hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6dd53889ed24a20c2e564119c4f2587d1b0b030 | 12,891 | py | Python | Functions/learning_models.py | goyalpike/RK4_SinDy | 7a53b03611f28915244a86f11de6552e513d0dbb | [
"MIT"
] | null | null | null | Functions/learning_models.py | goyalpike/RK4_SinDy | 7a53b03611f28915244a86f11de6552e513d0dbb | [
"MIT"
] | null | null | null | Functions/learning_models.py | goyalpike/RK4_SinDy | 7a53b03611f28915244a86f11de6552e513d0dbb | [
"MIT"
] | null | null | null | """ Training of a network """
import torch
import sys
import torch_optimizer as optim_all
import numpy as np
from .modules import rk4th_onestep_SparseId, rk4th_onestep_SparseId_parameter
def learning_sparse_model(dictionary, Coeffs, dataloaders, Params,lr_reduction = 10, quite = False):
'''
Parameters
----------
dictionary : A function
It is a symbolic dictionary, containing potential candidate functions that describes dynamics.
Coeffs : float
Coefficients that picks correct features from the dictionary .
dataloaders : dataset
dataloaders contains the data that follows PyTorch framework.
Params : dataclass
Containing additional auxilary parameters.
lr_reduction : float, optional
The learning rate is reduced by lr_reduction after each iteration. The default is 10.
quite : bool, optional
It decides whether to print coeffs after each iteration. The default is False.
Returns
-------
Coeffs : float
Non-zero coefficients picks features from the dictionary and
also determines right coefficients in front of the features.
loss_track : float
tacking loss after each epoch and iteration.
'''
# Define optimizer
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = Params.lr,weight_decay=Params.weightdecay)
# Define loss function
criteria = torch.nn.MSELoss()
# pre-allocate memory for loss_fuction
loss_track = np.zeros((Params.num_iter,Params.num_epochs))
#########################
###### Training #########
#########################
for p in range(Params.num_iter):
for g in range(Params.num_epochs):
Coeffs.train()
for y in dataloaders['train']:
opt_func.zero_grad()
loss_new = torch.autograd.Variable(torch.tensor([0.],requires_grad=True))
weights = 2**(-0.5*torch.linspace(0,0,1))
for i in range(y[0].shape[0]):
yi = y[0][i]
timesteps_i = torch.tensor(np.diff(y[1][i],axis=0)).float()
y_total = yi
##################################
# One forward step predictions
##################################
y_pred = rk4th_onestep_SparseId(y_total[:-1],dictionary,Coeffs,timestep = timesteps_i)
loss_new += criteria(y_pred,y_total[1:])
##################################
# One backward step predictions
##################################
y_pred_back = rk4th_onestep_SparseId(y_total[1:],dictionary, Coeffs,timestep = -timesteps_i)
loss_new += weights[0]*criteria(y_pred_back, y_total[:-1])
loss_new /= y[0].shape[0]
loss_track[p,g] += loss_new.item()
loss_new.backward()
opt_func.step()
sys.stdout.write("\r [Iter %d/%d] [Epoch %d/%d] [Training loss: %.2e] [Learning rate: %.2e]"
% (p+1,Params.num_iter,g+1,Params.num_epochs,loss_track[p,g],opt_func.param_groups[0]['lr']))
# Removing the coefficients smaller than tol and set gradients w.r.t. them to zero
# so that they will not be updated in the iterations
Ws = Coeffs.linear.weight.detach().clone()
Mask_Ws = (Ws.abs() > Params.tol_coeffs).type(torch.float)
Coeffs.linear.weight = torch.nn.Parameter(Ws * Mask_Ws)
if not quite:
print('\n')
print(Ws)
print('\nError in coeffs due to truncation: {}'.format((Ws - Coeffs.linear.weight).abs().max()))
print('Printing coeffs after {} iter after truncation'.format(p+1))
print(Coeffs.linear.weight)
print('\n'+'='*50)
Coeffs.linear.weight.register_hook(lambda grad: grad.mul_(Mask_Ws))
new_lr = opt_func.param_groups[0]['lr']/lr_reduction
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = new_lr,weight_decay=Params.weightdecay)
return Coeffs, loss_track
def learning_sparse_model_parameter(dictionary, Coeffs, dataloaders, Params,lr_reduction = 10, quite = False):
'''
Here, we tailor sparse learning for parameter cases. The script is tested for a single parametes.
Parameters
----------
dictionary : A function
It is a symbolic dictionary, containing potential candidate functions that describes dynamics.
Coeffs : float
Coefficients that picks correct features from the dictionary .
dataloaders : dataset
dataloaders contains the data that follows PyTorch framework.
Params : dataclass
Containing additional auxilary parameters.
lr_reduction : float, optional
The learning rate is reduced by lr_reduction after each iteration. The default is 10.
quite : bool, optional
It decides whether to print coeffs after each iteration. The default is False.
Returns
-------
Coeffs : float
Non-zero coefficients picks features from the dictionary and
also determines right coefficients in front of the features.
loss_track : float
tacking loss after each epoch and iteration.
'''
# Define optimizer
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = Params.lr,weight_decay=Params.weightdecay)
# Define loss functions
criteria = torch.nn.MSELoss()
# pre-allocate memory for loss_fuction
loss_track = np.zeros((Params.num_iter,Params.num_epochs))
#########################
###### Training #########
#########################
for p in range(Params.num_iter):
for g in range(Params.num_epochs):
Coeffs.train()
for y in dataloaders['train']:
opt_func.zero_grad()
loss_new = torch.autograd.Variable(torch.tensor([0.],requires_grad=True))
weights = 2**(-0.5*torch.linspace(0,0,1))
for i in range(y[0].shape[0]):
yi = y[0][i]
mui = y[2][i]
timesteps_i = torch.tensor(np.diff(y[1][i],axis=0)).float()
##########################
# One forward step predictions
y_pred = rk4th_onestep_SparseId_parameter(yi[:-1],mui[:-1],dictionary,Coeffs,timestep = timesteps_i)
loss_new += criteria(y_pred,yi[1:])
# One backward step predictions
y_pred_back = rk4th_onestep_SparseId_parameter(yi[1:],mui[:-1],dictionary, Coeffs,timestep = -timesteps_i)
loss_new += weights[0]*criteria(y_pred_back, yi[:-1])
loss_new /= y[0].shape[0]
loss_track[p,g] += loss_new.item()
loss_new.backward()
opt_func.step()
sys.stdout.write("\r [Iter %d/%d] [Epoch %d/%d] [Training loss: %.2e] [Learning rate: %.2e]"
% (p+1,Params.num_iter,g+1,Params.num_epochs,loss_track[p,g],opt_func.param_groups[0]['lr']))
# Removing the coefficients smaller than tol and set gradients w.r.t. them to zero
# so that they will not be updated in the iterations
Ws = Coeffs.linear.weight.detach().clone()
Mask_Ws = (Ws.abs() > Params.tol_coeffs).type(torch.float)
Coeffs.linear.weight = torch.nn.Parameter(Ws * Mask_Ws)
if not quite:
print('\n')
print(Ws)
print('\nError in coeffs due to truncation: {}'.format((Ws - Coeffs.linear.weight).abs().max()))
print('Printing coeffs after {} iter after truncation'.format(p+1))
print(Coeffs.linear.weight)
print('\n'+'='*50)
Coeffs.linear.weight.register_hook(lambda grad: grad.mul_(Mask_Ws))
new_lr = opt_func.param_groups[0]['lr']/lr_reduction
opt_func = optim_all.RAdam(Coeffs.parameters(), lr = new_lr,weight_decay=Params.weightdecay)
return Coeffs, loss_track
def learning_sparse_model_rational(dictionary, Coeffs_rational, dataloaders, Params,lr_reduction = 10, quite = False):
'''
Here, we tailor sparse learning for parameter cases. The script is tested for a single parametes.
Parameters
----------
dictionary : A function
It is a symbolic dictionary, containing potential candidate functions that describes dynamics.
Coeffs : float
Coefficients that picks correct features from the dictionary .
dataloaders : dataset
dataloaders contains the data that follows PyTorch framework.
Params : dataclass
Containing additional auxilary parameters.
lr_reduction : float, optional
The learning rate is reduced by lr_reduction after each iteration. The default is 10.
quite : bool, optional
It decides whether to print coeffs after each iteration. The default is False.
Returns
-------
Coeffs : float
Non-zero coefficients picks features from the dictionary and
also determines right coefficients in front of the features.
loss_track : float
tacking loss after each epoch and iteration.
'''
# Define optimizer
opt_func = optim_all.RAdam(Coeffs_rational.parameters(), lr = Params.lr,weight_decay=Params.weightdecay)
# Define loss function
criteria = torch.nn.MSELoss()
# pre-allocate memory for loss_fuction
loss_track = np.zeros((Params.num_iter,Params.num_epochs))
#########################
###### Training #########
#########################
for p in range(Params.num_iter):
for g in range(Params.num_epochs):
Coeffs_rational.train()
for y in dataloaders['train']:
opt_func.zero_grad()
loss_new = torch.autograd.Variable(torch.tensor([0.],requires_grad=True))
weights = 2**(-0.5*torch.linspace(0,0,1))
for i in range(y[0].shape[0]):
yi = y[0][i]
timesteps_i = torch.tensor(np.diff(y[1][i],axis=0)).float()
y_total = yi
##########################
# One forward step predictions
y_pred = rk4th_onestep_SparseId(y_total[:-1],dictionary,Coeffs_rational,timestep = timesteps_i)
loss_new += criteria(y_pred,y_total[1:])
# One backward step predictions
y_pred_back = rk4th_onestep_SparseId(y_total[1:],dictionary, Coeffs_rational,timestep = -timesteps_i)
loss_new += weights[0]*criteria(y_pred_back, y_total[:-1])
loss_new /= y[0].shape[0]
loss_track[p,g] += loss_new.item()
loss_new.backward()
opt_func.step()
sys.stdout.write("\r [Forced zero terms %d/%d] [Epoch %d/%d] [Training loss: %.2e] [Learning rate: %.2e]"
% (p,Params.num_iter,g+1,Params.num_epochs,loss_track[p,g],opt_func.param_groups[0]['lr']))
torch.save(Coeffs_rational,Params.save_model_path+'MM_model_coefficients_iter_{}.pkl'.format(p))
# Removing the coefficients smaller than tol and set gradients w.r.t. them to zero
# so that they will not be updated in the iterations
Ws_Num = Coeffs_rational.numerator.weight.detach().clone()
Ws_Den = Coeffs_rational.denominator.weight.detach().clone()
if len(Ws_Den[Ws_Den!=0]) == 0:
Adp_tol = torch.min(Ws_Num[Ws_Num!=0].abs().min()) + 1e-5
else:
Adp_tol = torch.min(Ws_Num[Ws_Num!=0].abs().min(), Ws_Den[Ws_Den!=0].abs().min()) + 1e-5
Mask_Ws_Num = (Ws_Num.abs() > Adp_tol).type(torch.float)
Mask_Ws_Den = (Ws_Den.abs() > Adp_tol).type(torch.float)
Coeffs_rational.numerator.weight = torch.nn.Parameter(Ws_Num * Mask_Ws_Num)
Coeffs_rational.denominator.weight = torch.nn.Parameter(Ws_Den * Mask_Ws_Den)
Coeffs_rational.numerator.weight.register_hook(lambda grad: grad.mul_(Mask_Ws_Num))
Coeffs_rational.denominator.weight.register_hook(lambda grad: grad.mul_(Mask_Ws_Den))
new_lr = opt_func.param_groups[0]['lr']/lr_reduction
opt_func = optim_all.RAdam(Coeffs_rational.parameters(), lr = new_lr,weight_decay=Params.weightdecay)
return Coeffs_rational, loss_track
| 44.298969 | 135 | 0.581258 |
c6e2e070ba03aa1892f65c8ab57f90a175c0ba2f | 31 | py | Python | flask/deploy.py | dcu-sharepoint/Browser-id | 4baeb18cb6bef26dad5a1a6fcf815ac1024203da | [
"MIT"
] | 1 | 2018-05-14T20:00:21.000Z | 2018-05-14T20:00:21.000Z | flask/deploy.py | zakybstrd21215/cross_browser | 4baeb18cb6bef26dad5a1a6fcf815ac1024203da | [
"MIT"
] | null | null | null | flask/deploy.py | zakybstrd21215/cross_browser | 4baeb18cb6bef26dad5a1a6fcf815ac1024203da | [
"MIT"
] | null | null | null | cp ./* ~/server/uniquemachine/
| 15.5 | 30 | 0.677419 |
c6e3e9d0abc03b1874ad93609b620dcead66d6e3 | 4,874 | py | Python | repairfiles.py | MrForg3t/sourcecodetrm | de9ce6eb1714d28998ef1f4a2ebc05cd7bf7d78f | [
"MIT"
] | null | null | null | repairfiles.py | MrForg3t/sourcecodetrm | de9ce6eb1714d28998ef1f4a2ebc05cd7bf7d78f | [
"MIT"
] | null | null | null | repairfiles.py | MrForg3t/sourcecodetrm | de9ce6eb1714d28998ef1f4a2ebc05cd7bf7d78f | [
"MIT"
] | null | null | null | from urllib import request
from os import path, system
from platform import system as osInfo
from time import sleep
from urllib import request
if __name__ == '__main__': repairFileMain()
sleep(7) | 33.156463 | 144 | 0.533032 |
c6e4a42a16095039958ecdd10b4a917bcf6aef59 | 581 | py | Python | resources/samd21flash.py | dotchetter/W.O.O.B.S | 6055020f21c462940e9477192c831d8ad0b2669e | [
"MIT"
] | null | null | null | resources/samd21flash.py | dotchetter/W.O.O.B.S | 6055020f21c462940e9477192c831d8ad0b2669e | [
"MIT"
] | 13 | 2020-11-10T12:29:46.000Z | 2020-11-20T00:04:02.000Z | resources/samd21flash.py | dotchetter/W.O.O.B.S | 6055020f21c462940e9477192c831d8ad0b2669e | [
"MIT"
] | null | null | null | import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-port")
parser.add_argument("-programmer")
parser.add_argument("-binary")
args = parser.parse_args()
port_norm = args.port
port_bootloader = f"{port_norm[0:3]}{int(port_norm[-1])+1}"
print("Issuing command to bootloader with 1200 baud")
os.system(f'cmd /k "mode {port_bootloader}:1200,n,8,1,p"')
print("Complete.\nFlashing device.")
os.system(f'cmd /k "{args.programmer}" --port={port_norm} -i -e -w -v -b {args.binary} -R') | 32.277778 | 95 | 0.666093 |
c6e54cd48762f141a1090fb8f2221a27cae5656e | 136 | py | Python | introduction/model_answer/python/09_tenka1_programmer_contest_1998.py | AAAR-Salmon/procon | d65865e7c7d98f7194f93610b4f06df8fff3332c | [
"MIT"
] | null | null | null | introduction/model_answer/python/09_tenka1_programmer_contest_1998.py | AAAR-Salmon/procon | d65865e7c7d98f7194f93610b4f06df8fff3332c | [
"MIT"
] | null | null | null | introduction/model_answer/python/09_tenka1_programmer_contest_1998.py | AAAR-Salmon/procon | d65865e7c7d98f7194f93610b4f06df8fff3332c | [
"MIT"
] | null | null | null | # None
a=[None] * 20
a[0]=a[1]=100
a[2]=200
for i in range(3,20):
a[i] = a[i-1] + a[i-2] + a[i-3]
print(a[19])
| 17 | 32 | 0.588235 |
c6e60e06fca1a3189ef7b894a20c3b5c14557fda | 41,045 | py | Python | test/ontic_type_test.py | neoinsanity/ontic | 2b313fb9fc45faf550791a797624c9997386c343 | [
"Apache-2.0"
] | 2 | 2017-11-06T12:01:20.000Z | 2021-03-01T23:52:41.000Z | test/ontic_type_test.py | neoinsanity/ontic | 2b313fb9fc45faf550791a797624c9997386c343 | [
"Apache-2.0"
] | 1 | 2016-12-02T04:04:03.000Z | 2016-12-02T04:04:03.000Z | test/ontic_type_test.py | neoinsanity/ontic | 2b313fb9fc45faf550791a797624c9997386c343 | [
"Apache-2.0"
] | 2 | 2015-06-26T22:24:57.000Z | 2016-12-01T02:15:36.000Z | """Test the basic functionality of the base and core data types."""
from datetime import date, time, datetime
from typing import NoReturn
from ontic import OnticType
from ontic import property
from ontic import type as o_type
from ontic.meta import Meta
from ontic.property import OnticProperty
from ontic.schema import Schema
from ontic.validation_exception import ValidationException
from test.utils import BaseTestCase
DEFAULT_CHILD_PROP = ChildOnticType(int_prop=99, str_prop='The Value')
| 37.111212 | 122 | 0.599903 |
c6e6312f6be52c69218d6689cca0b968307e1db4 | 46,788 | py | Python | resources.py | jajberni/BreederMap | 8a14d906a6af63dc2c27d77e43968c2e2794fa06 | [
"MIT"
] | null | null | null | resources.py | jajberni/BreederMap | 8a14d906a6af63dc2c27d77e43968c2e2794fa06 | [
"MIT"
] | null | null | null | resources.py | jajberni/BreederMap | 8a14d906a6af63dc2c27d77e43968c2e2794fa06 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.7)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x2a\xae\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\xd0\x00\x00\x01\xa8\x08\x06\x00\x00\x01\x81\x2b\x84\x3a\
\x00\x00\x2a\x75\x49\x44\x41\x54\x78\x9c\xed\xdd\x07\x7c\x13\xf5\
\xff\xc7\xf1\xb4\x4d\x37\x50\xa0\xd0\xb2\x0a\xa5\x14\x90\x51\x96\
\x80\xb2\x37\x82\x8a\x0c\x59\x8a\x80\x32\x14\x65\x08\x0a\x88\xac\
\x5a\xa6\x0c\x01\x41\x90\x21\x28\x7b\xca\x10\x91\x29\x53\x56\xd9\
\x32\x0a\x88\x2d\x65\xb7\x40\x19\x1d\x74\xd1\xfe\xb9\xf8\x4f\x7f\
\x2d\x4d\xd7\x35\xbd\xfb\xf6\xf2\x7a\x3e\x1e\x47\x92\xcb\xfa\x84\
\x6f\xde\xfd\x24\x97\x1b\xfa\xc4\xc4\x44\x9d\xd1\x8b\xf3\xd6\x3a\
\x8d\xb1\xb2\xb2\x4a\x90\x4e\xf5\xd2\x3f\xed\xc7\x74\x4f\x4c\xff\
\xe6\xb9\x9f\x5e\xed\x02\x94\x62\x59\x2f\x74\xd3\xf8\xe5\x56\x6a\
\x17\x92\xd3\x2c\x6b\x44\x2d\x41\xaa\x17\x7a\x21\x28\xa0\x71\x76\
\x1e\x30\x38\xf4\x56\xe5\x52\x6e\x25\x2e\x64\xe7\x31\x24\xb7\xee\
\xdf\x79\xa5\x44\xe1\x62\x97\xb3\xf3\x18\xde\xc5\xbd\x4e\x38\xd8\
\xd9\x47\x4a\xe7\x69\x2f\x5a\xa3\x4f\x48\x48\xb0\x49\x3e\xc3\x77\
\x68\x53\xd9\x0f\xe6\x37\x7d\xaf\xd9\x1f\x67\x58\xff\x06\x3a\x27\
\x47\xdb\x6c\x3f\x8e\xe5\x8c\xa8\xda\x05\x28\x85\x17\xaa\x35\xbc\
\x50\xad\xe1\x85\x6a\x8d\xde\xda\xda\xfa\x79\xf2\x19\xfb\x8f\x04\
\x99\xe5\x81\xcd\xf5\x38\x87\xfd\x83\x75\xb6\xb6\x36\x19\xdf\x30\
\x03\xa9\x46\xf4\x80\x99\x0a\x34\xd7\xe3\x1c\x39\x71\xc3\x2c\x8f\
\x93\xf4\xc5\x5b\xcb\x1f\xec\xa5\xd7\xa7\x4f\x7e\x41\xcd\x62\x72\
\x9a\xe5\xfc\x31\x52\xbb\x00\xa5\x24\xbd\x50\x8b\xc8\xa8\x96\x5f\
\xa4\x44\x7a\x7d\x96\xf5\xd6\xa5\xbd\x68\x88\x65\xbd\x75\x2d\x01\
\xed\x45\x4b\x2c\xab\xbd\xb0\x00\x5b\x63\x78\xa1\x5a\xc3\x0b\xd5\
\x1a\x5e\xa8\xd6\x58\xce\x0b\x7d\x79\x01\x76\xf2\x26\x9b\x1d\xe6\
\x7a\x9c\x69\x73\x0f\x99\xe5\x71\x2c\x67\x44\xa5\x7f\x8c\x5f\xba\
\xfb\xcd\xf8\xf2\xdf\x90\x47\xa1\x5e\xea\x96\x64\x3e\xae\xf9\x0a\
\xdc\xfe\x69\xd8\xec\x12\xd2\xf9\x14\x23\x3a\xff\x8b\xef\xca\xa8\
\x53\x52\xce\xb3\xac\xb7\xae\x25\x30\xbc\xd0\x6f\x7e\x99\xb2\xfb\
\xdc\xbf\x17\x9a\xab\x5d\x4c\x4e\x32\xbc\x50\xad\xbf\x48\x89\xe5\
\xbc\x75\x8d\x1b\x0f\x68\x71\xc3\x01\x89\x45\x6d\x3c\x60\x58\x0a\
\xf8\x2c\x26\x3a\xaf\xda\x85\xe4\xb4\x1f\x36\x2d\x5a\xac\x77\xb4\
\x77\x08\x57\xbb\x90\x9c\x36\xa0\x7d\xdf\xde\x6c\x3c\xa0\x35\x96\
\xf9\x42\xb3\xbb\xe1\x80\x68\x2a\x97\xae\xb0\xdf\x78\xde\x72\xda\
\x8b\xd6\x5f\xa4\xc4\xe4\x8f\x4c\x72\x7f\x33\x79\x79\xd1\x89\x68\
\x8f\x63\x99\x7f\x8c\xb4\x8c\x17\xaa\x35\xbc\x50\xad\xe1\x85\x6a\
\x4d\xaa\x17\x2a\xda\xc6\x03\xe6\x7a\x1c\xe1\x37\x1e\x30\xd7\xe3\
\xe8\xa5\x0f\xbc\x21\x61\xa1\x5e\xfd\x66\x7e\xf9\xaf\x59\x1e\x51\
\x30\xc3\xbb\x0e\xea\x58\xa7\x52\xad\x5f\x0d\x23\xea\x5e\xd0\x2d\
\x50\xeb\x4b\x19\x2c\xf7\x8f\x91\x56\x59\xce\x17\x6f\xb5\x8b\x50\
\x42\x42\x62\x82\x8d\x45\xbc\xd0\x9b\xa1\xb7\x2b\x5a\xc4\x0b\x2d\
\xe5\xee\x71\x9e\x05\xd8\x5a\x63\x79\x2f\x54\xab\x2d\xc6\x18\x4b\
\xcd\x2f\xd7\x35\xbe\x3e\xcb\x7b\xeb\x1a\x89\xb6\xe0\x99\x05\xd8\
\x59\xc4\x0b\xd5\x1a\x5e\xa8\xd6\xf0\x42\xb5\xc6\x72\x5f\xa8\x68\
\x1b\x0f\x98\xeb\x71\xf4\x5a\xdf\x62\x5f\xb2\xf4\xeb\x79\x85\x93\
\xbe\x78\x5f\xbd\x79\xed\xf5\xaf\x16\xfa\x1d\xcd\xd6\x23\x4a\xff\
\x5d\x02\x7d\x85\x1f\xdb\x63\x58\xeb\xea\x65\xab\xec\x90\xce\x27\
\xbd\x75\xcb\x79\x78\x1f\xd3\xf2\x92\x06\xcb\xfd\x63\xa4\x55\x96\
\xb3\x00\x5b\xab\xab\x98\x27\xf7\xd3\xb6\xe5\xb3\xf5\xc6\x75\xce\
\xb5\xac\xcf\x5b\xdd\x07\xe9\x93\x1f\x79\x40\xeb\x6f\x61\x2d\x4b\
\x5a\x08\x68\x9c\xc1\x60\xe6\x6e\xd2\xf8\xf9\xf6\x1c\xfe\x86\xc5\
\x74\x51\x4b\x50\xcd\xdb\x67\x57\x8a\xfd\x70\x4d\x5f\xfb\xc3\xda\
\xc3\x17\x8e\x77\x56\xb3\x28\x64\x9d\xb3\x83\xd3\xe3\x15\xa3\x16\
\x14\x90\xce\xa7\x48\xe8\xd0\x2e\x03\xba\x48\x93\x3a\x65\xc1\x1c\
\xf8\x93\xab\x31\x0c\xa8\xc6\xa4\x18\xd0\x39\x1b\x17\xfe\xbc\xf7\
\xcc\xa1\x0f\x55\xaa\x45\x58\x82\x2d\xec\x4b\x45\x5a\xaf\x68\xd6\
\x80\x49\x55\xa4\xf3\x16\xb1\x08\x25\xbb\x44\x1e\x4c\x49\x70\xc8\
\x4d\x9f\x4c\xaf\xa3\xd1\xb3\x4b\xf5\x9c\xaf\xe8\x25\x4b\xd7\x9e\
\x49\xf3\x3a\xd1\xea\xe9\xda\xae\x8a\xce\xde\x3e\xfb\xc7\x09\xc8\
\x8a\xf4\xea\x49\xb5\x23\xcb\x97\x79\x7a\x14\x30\x7b\x41\xd9\x21\
\x5a\x3d\x1e\xc5\x5d\x64\xef\x68\x33\x27\xf0\xa1\x48\x63\x18\x50\
\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\
\x35\x86\x01\xd5\x98\x54\x3b\xbc\x7e\x99\xb9\xd6\x19\x35\x17\xd1\
\xea\x31\xd7\x0e\xb9\xcd\xc5\x22\x8e\xa8\x65\x29\x2c\xea\xc8\x61\
\x96\x82\x1e\xaa\x31\x0c\xa8\xc6\x30\xa0\x1a\xc3\x7a\xb9\x1a\x92\
\xf4\xa1\x28\x3c\x2a\xc2\x55\xed\x62\x90\x7d\xb3\x36\xcc\x5f\x6e\
\x18\xd0\xbc\x4e\x79\x1e\xaa\x5d\x0c\xb2\x6f\x70\xc7\x7e\xdd\xf9\
\xda\xa2\x31\x7c\x28\xd2\x18\x06\x54\x63\x18\x50\x8d\xe1\x6b\x8b\
\x86\x58\xcc\x11\xe6\x2d\x85\xe1\x20\x27\x19\xad\x97\x9b\x9d\x03\
\xc5\xcb\x95\xde\x4f\x64\xa2\xd5\x33\xac\x7f\x03\xc5\xd7\xcb\x4d\
\xaf\x1e\x7a\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\xaa\x31\x0c\
\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\xaa\x31\x0c\xa8\xc6\xb0\
\xa2\x75\x36\x09\xbb\xa2\xb5\x74\x2a\xcc\x32\x5d\xd1\x77\x3b\x22\
\xa0\x54\x3b\x6f\x4c\x3e\x13\xb9\x17\x3d\x54\x63\x18\x50\x8d\x49\
\x31\xa0\xd7\x6e\x07\xd5\x1c\x36\x7f\xec\x09\xb5\x8a\x81\x3c\xdd\
\x5b\x76\x19\xd1\xa1\xc1\xdb\x53\xa4\xf3\xac\xb1\xa0\x01\xcb\x77\
\xad\xfd\x56\x9a\x2c\xe6\xa8\xa9\x96\x42\xda\x4f\x63\x8a\xf5\x72\
\x49\x69\xee\xf5\xfd\xc0\xc9\x3e\x25\xdd\x4a\x5c\x48\x71\x10\x01\
\x23\x4b\x38\x54\x84\x16\x98\x3a\xd2\x45\x52\x42\xc7\x2c\x99\xb4\
\xef\x42\x50\x40\x63\x45\x2b\x82\xd9\xa4\x58\xb0\x10\x70\xe3\x6a\
\x3d\x06\x33\x77\xf3\x5b\x3a\x75\x67\xd2\x51\x21\x46\x2e\x1a\xff\
\x97\xda\x05\x21\x7b\xce\x5e\x3b\xdf\x52\x3a\xe5\x53\xae\xc6\x30\
\xa0\x1a\xc3\x6e\x6d\x34\x22\xd5\xaf\x2d\x0c\x6a\xee\x95\xfc\x57\
\x32\x7e\x3e\xd3\x18\x7a\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\
\xaa\x31\x0c\xa8\xc6\xa4\x39\xa0\x22\x2c\xdb\x8d\x8c\x89\x72\x49\
\x4c\x48\xb4\xce\xe3\xe8\xfc\x48\xed\x5a\x24\xb1\x71\x71\x0e\x4f\
\xa3\x9e\x16\x2e\xe4\xe2\x7a\x53\xed\x5a\x2a\x97\xae\xb0\xdf\xd4\
\xfc\x14\x03\xca\xf7\xd0\xdc\x69\x83\xdf\x2f\xb6\x36\xd6\x36\xf1\
\xd2\x79\xc3\x80\x8e\x5b\x3a\x75\xc7\x99\x6b\xe7\xdf\x50\xb7\x2c\
\xc8\xd5\xd1\xf7\xc3\x38\x27\x07\xa7\x27\x2b\x47\x2d\xc8\xaf\xbf\
\xf3\xe0\x5e\x39\x06\x33\xf7\x8b\x8a\x8e\x72\x59\xbd\x77\xa3\x9f\
\xbe\xff\xf7\xc3\xae\xa8\x5d\x0c\xcc\x63\xdd\xbe\x4d\x63\xd3\xfd\
\x94\x5b\xbd\x72\x31\x5d\x95\x4a\xee\x4a\xd5\x63\x90\xde\xa1\x14\
\x25\x4a\x1f\x6e\x72\xc3\xd6\x0b\xba\xc8\xa8\xb8\x34\xaf\x17\xed\
\xf0\x97\xe9\x0e\x68\x7e\x17\x07\xe1\x0e\xef\xa8\x74\x3d\x36\x36\
\xe9\xaf\x5e\x25\xda\xff\x0f\xdf\x43\x35\x86\x01\xd5\x18\x06\x54\
\x63\x18\x50\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\
\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\
\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\x35\x26\xdd\
\x01\xbd\x7e\x23\x4c\x97\x60\x62\x2f\x29\x6a\xda\x7f\x24\x48\xd1\
\xe7\x8b\x89\x49\x77\xef\xb3\x8a\xd7\x93\x91\x74\x07\x34\xe8\xe6\
\x63\xc3\x24\x92\x03\x82\xfd\x07\x8a\x56\x8f\x9e\x0d\x7d\xb5\x63\
\x5c\xaf\xaf\x9b\x1a\x12\x3a\xb1\xcf\xe8\x86\xa3\x7e\x9a\x70\x50\
\xed\x82\x20\x5f\xcb\x9a\x4d\x16\xfa\x94\xae\xb8\xcf\x30\xa0\x15\
\x4b\x95\x3f\x44\x52\x73\x2f\x36\xc9\xd7\x30\xbe\xb6\x68\x0c\x03\
\xaa\x31\x0c\xa8\xc6\x30\xa0\x1a\xc3\x80\x6a\x0c\x03\xaa\x31\x6c\
\x92\xaf\x01\x93\xfa\x8c\x69\x50\xa1\x54\x39\xc3\x2e\x72\x39\x2a\
\x84\x06\x8c\xfc\x69\xfc\xa1\x39\x83\xa6\x54\x2c\x51\xb8\x58\x80\
\x61\x40\x19\xcc\xdc\x6f\xe0\xec\xaf\x2e\x71\x98\x0f\x0d\x62\x40\
\x35\xc6\x30\xa0\xeb\xbf\xf9\xc5\xae\xd3\x37\x1f\xc6\xaa\x5d\x0c\
\xe4\xeb\xdc\xa4\xdd\x38\xe9\xd4\x30\xa0\x7a\x1b\x9b\xb8\x4f\xda\
\x7c\xf8\xd9\x82\xad\xbf\xcc\x53\xb7\x2c\xc8\x21\x7d\xc2\x7d\xaf\
\xe9\xbb\xbe\xd2\xf9\xa4\x3f\xb9\xad\x6a\x37\xfb\x51\x9a\xd4\x2b\
\x0b\xe6\x40\x0f\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\x35\x86\
\x01\xd5\x18\x06\x54\x63\x52\x0c\xe8\xea\x3f\x7f\x1d\xb7\x6e\xff\
\xe6\x31\x6a\x15\x83\xac\xab\x5d\xa1\xc6\x96\xaf\xdf\x1f\xd2\xce\
\x78\x99\x5f\x5b\x72\x39\xff\x80\xd3\x6d\xa5\xb1\x9b\x3d\xf0\xdb\
\x4a\x1e\x6e\xc5\x2f\xf1\x6b\x8b\x46\x0c\x9a\x33\xe2\xe2\x3a\xdf\
\x25\x0e\x7a\x06\x53\x3b\x3a\xfb\xf5\x8a\xe6\x43\x51\x86\xa4\xf7\
\x7b\xee\x59\xff\x3c\xdd\x01\x6d\x52\xcf\x4b\xd7\xb0\x8e\xa7\x42\
\xa5\xfc\xc7\x6f\xfa\xde\x74\xaf\xf7\x1d\xda\x54\xa1\x4a\xfe\x33\
\x73\xc1\x61\xdd\xd3\xf0\x98\x34\xaf\x57\xba\x1e\x49\x7a\xff\x47\
\x24\x54\x63\x18\x50\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\
\x18\x50\x8d\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\
\x61\x40\x35\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x61\x40\x35\
\x86\x01\xd5\x18\x06\x54\x63\x18\x50\x8d\x49\x77\x40\xf7\x1d\x0e\
\x34\x4c\x22\xc9\x68\x15\x15\xa5\x89\x56\x8f\xbe\x6b\xd3\x0e\xbe\
\x6b\xf6\x6e\xf4\x53\xbb\x10\x64\x5f\x1e\x47\xe7\x30\x7d\x97\x26\
\xed\xc7\x31\xa0\xe9\xc9\x3d\x6b\xfd\x2d\x1f\x39\xdf\xd5\xf0\x27\
\x57\xda\x1d\xca\xc1\x73\x47\xba\xcd\xdc\xf0\xe3\x0a\xb5\x8b\x12\
\x8f\xf8\x83\xd9\xfc\xd5\x46\x8b\xfb\xb7\xeb\xd3\x47\x3a\x9f\xd4\
\x43\x1b\x56\xad\xbb\x52\x9a\xa4\xf3\x73\x37\xff\xf4\xd3\x9e\x53\
\x07\x7a\xab\x55\xa0\x81\x71\xf5\x6f\xf1\xff\x3f\x55\x51\xb7\x52\
\xed\x0d\xc3\xba\x0e\xec\xf4\xf2\x7c\x93\x1f\x8a\xa4\xd1\x36\x8e\
\x38\x72\x17\xbe\xb6\x68\x0c\x03\xaa\x31\x0c\xa8\xc6\x30\xa0\x1a\
\xc3\x80\x6a\x0c\x3b\x40\xd6\x00\x93\x07\xe2\x61\x30\x73\x2f\x69\
\xec\x8c\x83\xca\x26\xf9\x1a\x61\x1c\x54\x7a\xa8\xc6\x30\xa0\x1a\
\x63\x18\xd0\x0f\x5a\x74\x1e\xb9\x62\xf7\xba\x49\x6a\x17\x03\xf9\
\x6c\xac\x6d\xe2\xa4\x53\xc3\x80\xbe\xdb\xb0\xcd\x64\x06\x34\x77\
\xdb\xe0\xf7\x8b\x9d\x74\xaa\x4f\x4c\xe3\x18\xdb\x97\x6f\xfc\x53\
\xf7\xeb\x45\xe3\x0e\x2b\x5a\x15\x60\xa1\xa4\x15\x4d\xa4\x75\x13\
\x5e\x9e\x9f\xea\x33\xd1\xe8\x25\x13\xf7\x5f\x0c\xba\xdc\x48\x99\
\xb2\x00\x48\xa4\x95\x86\x8c\x2b\x0e\xa5\x79\xc0\x66\x16\x2f\x00\
\xea\x4b\xb5\xdc\x4f\x92\x90\x98\x60\xa3\x5e\x49\x00\x92\x5b\xf4\
\xfb\xb2\x39\x7d\xdf\xee\x31\x30\x29\xa0\xd6\x56\xd6\xcf\xd5\x2c\
\x08\xc0\xff\x94\x2d\xe1\x75\x42\x3a\x4d\xf1\x11\xb7\x47\xcb\xae\
\x5f\x2d\xdb\xb5\x66\x8a\x3a\x25\x01\x90\xd8\xd9\xda\x3d\x6b\x5c\
\xad\xfe\x32\xe9\x7c\xca\xef\xa0\x0d\xde\x9a\x6a\x98\xf8\x2e\x0a\
\xa8\x62\xce\xa0\x29\x15\x4a\x14\x2e\x76\xd9\x78\xd9\xe4\x2f\xdb\
\xc9\x97\x22\x05\xde\xb9\x5e\xe3\xec\xbf\x17\x5a\xc4\xc6\xc5\x3a\
\x29\x51\x20\x60\x29\xac\xac\xac\x12\xca\x14\x2b\x7d\xaa\x66\xf9\
\x6a\xbf\xa7\x75\x9b\x0c\x57\x3d\xf1\x2a\xe6\x79\x5a\x9a\xcc\x5b\
\x1a\x80\xcc\x60\xdd\x30\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\
\x40\x01\x81\x11\x50\x40\x60\x19\x06\xf4\xdc\xb5\x0b\x2d\xe6\x6d\
\x59\xbc\x30\xf4\xf1\x03\x4f\x05\xea\x01\x2c\x4a\xdf\xb7\x7a\x0c\
\x7c\xf3\xf5\x16\x3f\xa4\x75\x7d\xaa\x80\x9e\xbc\x72\xf6\xad\x89\
\x2b\xbe\x4b\xf3\x77\x19\x00\xe6\xb3\x68\xdb\xb2\x39\xd2\x64\xbc\
\xdc\xba\x76\xf3\x79\x1f\xb7\xe9\xd9\xdf\x78\x39\x29\xa0\xdf\xae\
\x9a\xb5\xe9\x78\xc0\xa9\x76\x0a\xd7\x07\x20\x99\xed\xfe\x7b\x3e\
\x93\x26\x69\xe7\xd5\x49\xfb\x3b\x66\xd5\x3e\x40\x2c\x11\xcf\x22\
\x0b\x4a\xb9\x64\x21\x11\x20\x30\x7d\x42\x82\xfc\xed\x40\x9b\xd4\
\xf3\xd2\x35\xac\xe3\x69\xc6\x72\xc4\x94\x9d\x03\xb1\xf8\x0e\x6d\
\x6a\xc6\x4a\xc4\x34\x73\xc1\x61\xdd\xd3\xf0\x18\x59\xf7\x1d\xd6\
\xbf\x81\xce\xc9\xd1\xd6\xcc\x15\x89\x47\xee\x7b\x88\x0e\x0a\x08\
\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\
\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\
\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\
\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\
\xd3\x5b\x5b\xcb\x3f\xb2\xf6\xbe\xc3\x81\x86\x09\x69\xcb\xce\xee\
\x52\x2c\xc1\xb4\xb9\x87\xd4\x2e\x41\x68\x86\x0e\x5a\xd4\xb5\xc8\
\x3f\x77\x1f\xde\x2b\xab\x76\x31\xc8\xad\xa4\x9d\x42\x5a\x65\x78\
\x2b\x64\x9d\x21\xa0\xf3\x06\x4f\x2b\x77\xe5\xe6\xb5\x3a\x23\x16\
\xfa\x1d\x51\xbb\x20\xe4\x46\x84\xd3\xdc\x9a\x56\x6f\xf0\xcb\xc0\
\x0e\x1f\x7f\x94\xf4\x1d\xb4\xbc\x87\xf7\x51\xe3\x91\xb5\xd9\x4f\
\x2e\xa0\x8e\x85\x5f\xce\xf4\x2c\x9c\xbf\x50\xb0\xf1\xb2\xc9\x85\
\x44\xc6\xa0\x1a\xed\x39\x75\xa0\xf7\xe2\x3f\x96\x7f\x1f\x1d\x1b\
\xe3\x9c\xd3\x05\x02\x96\xe2\xfd\x66\x1d\xc7\x74\x6a\xdc\x76\x42\
\x7a\xb7\xc9\xd4\x52\xdc\xe6\xaf\x36\x5a\x2c\x4d\xe6\x29\x0b\x40\
\x66\xf1\x33\x0b\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\
\xc0\x08\x28\x20\xb0\x74\x03\x1a\x17\x1f\x6f\xbf\x6c\xd7\xea\xa9\
\x27\x2e\x9f\x69\xf3\x24\xf2\xa9\x9b\xf1\x67\x16\x67\x07\xa7\xc7\
\x8a\x54\x07\x68\x4c\x64\x74\x54\x7e\xe9\xd4\x56\xaf\x8f\x71\xb4\
\x73\x0c\xaf\xe8\x59\xfe\x60\x87\x86\x6d\xbe\x2d\x5b\xdc\xeb\x84\
\xa9\xdb\xa7\x0a\x68\x74\x6c\x74\x9e\xf7\xc6\xf7\x0d\xcf\xcc\x93\
\x00\x90\x47\x6a\x7e\x71\xf1\xe1\xf6\xc7\x2e\x9d\xec\x20\x4d\xc6\
\xf9\x3d\xdf\xe8\x3a\xbc\x5d\xfd\xb7\xa6\x19\x2f\xa7\x08\xe8\x8c\
\xf5\xf3\x56\x1d\xfa\xfb\xe8\x7b\x4a\x16\x0a\xe0\x7f\x96\xee\x5c\
\x33\x55\x9a\x8c\x2b\x0b\xa5\x08\x28\xe1\x04\xc4\xf0\xdb\x91\x1d\
\x43\xde\xa9\xdb\x6a\x26\x0b\x89\x00\x01\x49\xcb\x7c\xa4\xd3\x14\
\x01\x75\x71\xce\x17\x6a\xbc\x02\x80\x7a\xba\xb7\xe8\xfc\xb5\x74\
\x9a\x22\xa0\xbf\x8c\x98\xeb\xfe\xeb\xc1\xad\x5f\xaf\xd8\xbd\x6e\
\x92\x3a\x65\x01\x96\x2d\x7f\x1e\x97\x90\x9f\xbf\xfa\xa1\x88\xf1\
\x72\xaa\x8f\xb8\xef\x36\x6c\x33\x59\x9a\xa4\xf3\xdf\xff\xba\x60\
\xe9\xfe\xb3\x7f\xf5\x50\xb2\x40\xc0\xd2\xb8\x17\x70\x0b\x9c\xd8\
\x67\x54\x23\xd7\x7c\x05\x6f\xbd\x7c\x5d\xba\xdf\x41\x3f\x7f\xf7\
\x93\x9e\xd2\x94\x73\xa5\x01\x48\x0f\x0b\x89\x00\x81\x11\x50\x40\
\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x65\x3a\xa0\x5b\
\x8f\xec\x18\x1c\x70\xe3\x6a\xfd\x53\x57\xcf\xbd\x19\x1b\x17\xeb\
\x98\x93\x45\x01\x5a\x56\xdd\xdb\x67\x67\x99\xe2\x5e\x27\x3b\x36\
\x7a\x67\x92\xbd\xad\x5d\x54\x7a\xb7\x35\x19\xd0\x1f\xb7\x2c\x59\
\xb0\xeb\xe4\xbe\x8f\x73\xa6\x3c\x68\x12\xfb\xae\xce\xb4\x33\xd7\
\xce\xbf\x21\x4d\x1b\x0e\x6c\x19\x95\x7c\xbe\xb4\x03\x79\x69\x1f\
\xd5\xc9\xe7\x25\x05\x34\xfe\x79\xbc\x5d\xa7\x6f\x3e\x8a\x51\xaa\
\x48\x68\x0c\xe1\xcc\x36\xe9\xe8\x0e\xc6\x7d\x52\xcf\xec\x3f\xb1\
\x9a\x67\x91\x92\xe7\x0c\x01\x9d\xbc\x6a\xe6\x66\xff\x80\xd3\x6d\
\xd5\x2d\x0f\xb9\x17\xed\xd3\xdc\x86\xcc\x1d\x75\x56\xfa\xf8\x6b\
\x08\x28\xe1\x44\xf6\x10\xce\x9c\x10\x13\x17\xeb\xa4\x4f\x48\x48\
\xb0\x91\xfb\x00\x4d\xea\x79\xe9\x1a\xd6\xf1\x34\x63\x49\x62\xca\
\xce\x11\xca\x7c\x87\x36\x35\x63\x25\x62\x9a\xb9\xe0\xb0\xee\x69\
\xb8\xbc\x6f\x47\xc3\xfa\x37\xd0\x39\x39\xda\x9a\xb9\x22\xf1\xc8\
\x7d\x0f\xf1\x33\x0b\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\
\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\
\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\
\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\
\x28\x20\x30\x02\x0a\x08\x4c\x6f\x6d\x6d\xfd\x5c\xee\x9d\xf7\x1d\
\x0e\x34\x4c\x48\x5b\x76\x76\x97\x62\x09\xa6\xcd\x3d\xa4\x76\x09\
\x42\xa3\x83\xc2\x0c\xd8\xab\x5f\x4e\x31\x04\x74\xd3\xf8\xe5\x56\
\x1d\xc6\xf6\x78\x9e\x98\x98\x68\xad\x76\x41\xc8\x8d\x08\x67\x4e\
\x90\x72\x99\xd4\x41\x37\x8e\x5b\x66\xd8\xbb\xdf\xfb\x13\xfa\x3e\
\x7d\x16\x13\x9d\x57\xbd\xb2\x00\xcb\xb6\x7a\xcc\xa2\xbc\x0e\x76\
\x0e\x11\xd2\xf9\x54\x1f\x71\x57\x8d\x5e\x94\xcf\x78\xfe\xcc\x3f\
\x7f\xb7\x1a\xb7\x6c\xda\x76\x25\x8b\x13\x19\x1f\xe4\x90\x13\x3e\
\x7e\xbb\x67\xff\xd6\xaf\x35\x9f\x67\xea\xba\x74\xbf\x83\x56\x2f\
\x5b\x65\x87\xd4\x66\x73\xa6\x2c\x00\x19\x61\x21\x11\x20\x30\x02\
\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\
\x08\xcc\x64\x40\xfd\x96\x4e\xdd\x79\xf6\xda\xf9\x96\x4a\x17\x03\
\x58\xba\x97\x7f\xd6\x4c\x11\xd0\xab\xb7\xfe\x7d\xed\xab\x05\xdf\
\x1c\x53\xb6\x24\x00\x46\xed\xc7\x74\x4f\x7c\xb5\x5c\xd5\x3f\x46\
\x77\x1f\xfa\x96\x74\x39\x45\x40\x09\x27\xa0\xbe\x53\x57\xcf\xbd\
\x79\xee\xdf\x8b\xcd\xab\x96\xa9\xb4\x27\x29\xa0\xac\x7f\x0b\x88\
\xe3\xd0\xdf\x47\xdf\x4b\x11\x50\x47\x7b\x87\x70\x35\x0b\x02\xf0\
\x3f\x03\xda\xf7\xe9\x2d\x9d\xa6\xf8\x88\x2b\x7d\x41\x95\x3e\x03\
\xab\x53\x12\x00\x49\xf2\x05\x45\xa9\x96\xe2\x1a\xaf\x9c\xbc\x72\
\xe6\x66\xff\xcb\xa7\xdb\x2a\x59\x18\x60\xa9\xec\xf4\x76\xcf\xd6\
\xfa\x2e\x76\x7a\x79\xbe\x3e\x31\x31\xe3\x86\xc9\x86\xdc\x80\xf9\
\x59\x59\x59\x25\x64\x74\x9b\x34\x57\x54\x98\xb2\xfa\xfb\x8d\xc7\
\x2e\x9d\x6c\x6f\xde\x92\x00\xbc\xcc\xc5\x39\x5f\xe8\xe2\xe1\xb3\
\x8b\xdb\x58\xdb\xc4\xbf\x7c\x5d\xaa\x80\x3e\x8e\x78\xe2\xfe\xd1\
\x94\x01\xf7\x94\x29\x0d\xc0\x93\xc8\xa7\x6e\x1d\x7d\x3f\x8c\xeb\
\xde\xb2\xcb\x88\x0e\x0d\xde\x9e\x92\xfc\xba\x54\x01\x25\x9c\x80\
\x3a\x96\xef\x5a\xfb\xad\xb4\xab\x93\x37\x5f\x6b\x3e\xd7\x38\x2f\
\x45\x40\x59\x82\x0b\xa8\x6b\xd1\xef\x4b\x7f\x30\x19\xd0\xbb\x0f\
\x43\xbc\xd5\x29\x09\x40\x72\xd3\xd6\xcc\x59\x3f\xac\xeb\xc0\x4e\
\xd2\xf9\xa4\x80\xfe\xbc\x7d\xe5\x0c\xf5\x4a\x02\x60\x74\xe4\xa2\
\x7f\x47\xe3\xf9\xa4\x80\x9e\xb8\x72\xa6\x8d\x3a\xe5\x00\x48\x0b\
\xdb\x83\x02\x02\x4b\x0a\xa8\x77\xf1\xd2\x27\xaf\xdd\x0e\xaa\xa9\
\x66\x31\x00\x52\x4a\x0a\x68\xe7\xc6\xed\xc7\x4d\x5a\x39\xe3\x37\
\x35\x8b\x01\xa0\xd3\x95\x28\x5c\x2c\xc0\x78\x3e\x29\xa0\xb5\x5e\
\xa9\xbe\x55\x9d\x72\x00\x24\x37\xa9\xcf\x98\x06\xc6\xf3\x29\xbe\
\x83\x7e\xf3\xe1\x57\x2d\xbf\xf9\x65\xca\x2e\xe5\x4b\x02\x20\xa9\
\xe6\xed\xb3\x2b\xaf\x53\x9e\x87\xc6\xcb\x29\x02\x5a\xb5\x4c\xe5\
\xdd\x3d\x5b\xbd\x37\x6c\xe9\x8e\xd5\xd3\x94\x2f\x0d\xb0\x6c\x45\
\x0a\xba\xfd\xeb\xdb\x73\xf8\x1b\xc9\xe7\xa5\x5a\x8a\xdb\xae\xde\
\x9b\xd3\x5f\x7b\xe5\xd5\xcd\x9f\xcd\x1a\xfa\x8f\x72\xa5\x01\x96\
\xed\xa3\xd6\xdd\xbe\x78\xa7\x6e\xab\x99\x2f\xcf\x37\xf9\x33\x4b\
\x51\x57\xf7\x6b\xc6\xed\x42\xf7\x9f\xfd\xab\xfb\xbd\xb0\x50\xd6\
\x32\x02\xcc\x2c\x8f\x63\x9e\x87\x75\x2a\xd5\xfa\xd5\x35\x5f\x81\
\xdb\x69\xdd\x26\xc3\xdf\x41\x1b\x57\xab\xbf\xdc\xbc\x65\x01\xc8\
\x2c\x56\x54\x00\x04\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\
\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\
\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\x60\x59\x0e\xe8\xbd\xb0\
\xd0\x32\x0f\x9e\x3c\xf4\xc8\x89\x62\x72\x9b\xd0\x47\x0f\x4a\xe9\
\x74\x89\xd6\x6e\x05\x0a\x07\xa9\x5d\x8b\x88\x1e\x85\x3f\x2e\xfa\
\xf4\x59\x84\xab\x9b\x4b\xa1\x60\x0e\x6f\xa9\xd3\xd9\xea\x6d\x63\
\xca\x7b\x78\x1f\xcd\xca\x7d\x32\x0c\xe8\xe2\x3f\x56\xcc\xfa\xfd\
\xe8\xce\xcf\xe5\x97\xa5\x21\xd2\x6e\xbd\xad\x32\xbc\x15\x90\x29\
\xd2\xc6\xd9\x9f\xbf\xfb\x49\x8f\xfc\x79\x5c\x42\xd2\xba\x8d\xc9\
\x80\xde\x7e\x70\xb7\xfc\xe7\x73\xbe\x3e\xff\x3c\xe1\xb9\x6d\xce\
\x95\x97\x0b\x11\x4e\x98\xd1\xd9\x6b\xe7\x5b\x1a\x0f\xb5\x22\x05\
\xd5\xd4\x96\x63\x26\x8f\xcd\x22\x1d\x40\x49\x89\x02\x01\xfc\xe7\
\xfb\x5f\x17\x2c\x93\xa6\x35\x63\x17\x3b\xdb\xdb\xda\x45\x19\xe7\
\x27\x05\x34\x36\x2e\xd6\xb1\xcb\xb8\xde\x51\xa6\xef\x0e\x40\x09\
\x5d\xc7\xf5\x8e\x9c\xd6\xcf\xaf\xb6\x77\x71\xaf\x13\xd2\xe5\xa4\
\x80\x12\x4e\x40\x0c\xc3\xe6\xfb\xfa\xcf\xff\x62\x86\x97\x7b\x81\
\xc2\x41\x86\x80\x76\x18\xdb\xe3\xb9\xda\x45\x01\xf8\x9f\x7e\x33\
\xbe\x08\x94\x76\x3b\xa4\xbf\xff\xe4\x61\x49\x0e\x71\x0f\x88\x67\
\xe3\xc1\xad\x23\xf4\xb3\xd6\xff\xb8\x42\xed\x42\x00\xa4\xb6\x7c\
\xf7\xba\xc9\xfa\x4b\xc1\x57\x1a\x64\x7c\x53\x00\x6a\xc8\xd6\xaa\
\x7e\x3d\xbb\x54\x37\x57\x1d\xc2\x0a\xbc\x1e\xa6\x3b\x74\x3c\x58\
\xd6\x7d\x9b\xd4\xf3\xd2\x95\x2c\xe1\x62\xe6\x8a\xc4\xb3\x74\xed\
\x19\xd9\xf7\xb5\x84\xf7\x50\x76\xfe\x7f\xb2\x15\x50\x4f\x8f\x02\
\xd9\xb9\x7b\xae\x10\xf6\x24\x5a\xf6\x7d\x0b\x17\x72\xb6\x88\xff\
\xa3\xec\xb0\x84\xff\x1f\xbd\xde\x46\x17\x1f\x2f\x6f\x39\x2c\x2b\
\xcb\x03\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\
\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\
\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\
\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\
\xa0\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\
\x80\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\
\xc0\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\
\x08\x28\x20\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\
\x28\x20\x30\x02\x0a\x08\x2c\x5b\x01\xdd\x7f\x24\xc8\x5c\x75\x08\
\xeb\x5e\x48\xb8\xec\xfb\x5e\xbc\x1c\xa2\x0b\xb9\x1f\x61\xc6\x6a\
\xb4\xc7\x12\xde\x43\xf1\xf1\xcf\x65\xdf\x57\x9f\xd7\x29\xcf\xc3\
\xf0\xa8\x08\x57\x39\x77\x3e\x60\x01\xff\xb9\xd9\x71\xf1\x4a\xa8\
\xda\x25\x08\x8f\xf7\x50\xfa\xf4\x0d\xab\xd4\x5d\xb9\xed\xd8\xae\
\x41\x6a\x17\x02\x20\x35\x7d\x9f\xb7\xba\x7f\x4e\x40\x01\xf1\x0c\
\xeb\x3a\xb0\x93\xe1\x3b\xa8\x77\xf1\xd2\x27\xaf\xdd\x0e\xaa\xa9\
\x76\x41\x00\xfe\xa7\x6e\xa5\xda\x1b\x0c\x01\x9d\xd6\x6f\x5c\xad\
\xf6\x63\xba\x27\xaa\x5d\x10\x80\xff\x6c\x1a\xbf\xdc\x4a\x3a\xd5\
\x27\x9f\xf1\xfe\x84\xbe\x4f\x9f\xc5\x44\xe7\x55\xaf\x2c\x00\xc6\
\x70\x4a\x52\xfc\xcc\xb2\x6a\xf4\xa2\x7c\x1b\x0f\x6e\x1d\xb1\x7c\
\xf7\xba\xc9\xca\x97\x25\x30\xe9\xb3\x85\x55\x86\xb7\x02\xb2\xa5\
\x9a\xb7\xcf\x2e\xdf\x9e\xc3\xdf\x48\x3e\x2f\xd5\xef\xa0\x1d\x1a\
\xb6\xf9\x56\x9a\x16\xfe\xbe\xf4\x87\xed\xc7\xf7\xf4\x57\xae\x3c\
\x81\x11\x4e\xe4\xa0\x4a\x9e\xaf\x1c\x9c\xd0\x7b\x54\x23\x53\xd7\
\xa5\xb9\xa2\xc2\xc7\x6f\xf7\x1c\x20\x4d\x4f\x23\xc3\x0b\xcf\xdb\
\xb2\x78\xe1\xf1\x80\x53\xed\x72\xac\x42\xc0\xc2\x94\x2b\x51\xe6\
\x78\xdf\x17\xf9\x92\x16\xd0\xa6\x77\xbb\x0c\xd7\x24\xca\xe7\x9c\
\xf7\xfe\x88\xf7\x07\xb7\x37\x5f\x69\x00\x32\x8b\x75\x71\x01\x81\
\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\
\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\
\x40\x60\x04\x14\x10\x18\x01\x05\x04\x96\x6e\x40\x8f\x5c\xf4\xef\
\x28\x6d\x72\x76\xfd\xde\x8d\xaa\xcf\x62\x9e\xe5\x7d\x9e\x90\x60\
\xb8\xbd\xb3\x83\xd3\x63\x45\xaa\x03\x34\x26\x32\x3a\x2a\xbf\x95\
\x95\x55\xa2\xa3\x9d\x43\x78\xe1\xfc\x85\x82\x1b\x54\xa9\xb3\xfa\
\xdd\x86\x6d\xd2\xdc\xfe\xda\x64\x40\xd7\x1f\xd8\x32\x6a\xd5\x9e\
\x0d\x13\xd2\x7b\x12\x33\xd4\x0a\x58\xa4\xc4\xc4\x44\xab\xa8\x98\
\x67\xf9\x82\x43\x6e\xfa\x04\xef\xbe\xe9\xb3\x62\xf7\xba\x49\x65\
\x4b\x78\xf9\x4f\xfd\xc4\xef\xb5\x97\x6f\x9b\x2a\xa0\x23\x7f\x1a\
\x7f\x28\x20\xf8\x6a\x7d\x65\x4a\x05\x20\xf9\xe7\x56\x60\x6d\x69\
\xbf\x60\xc9\x77\x77\x22\x49\x11\xd0\x95\xbb\xd7\x4f\x24\x9c\x80\
\x7a\x3a\xfa\xf6\x8c\xdb\xe0\xb7\xd4\xd6\x78\x39\x45\x40\x37\x1c\
\xfc\x6d\xa4\xf2\x25\x01\x30\x92\x96\xf3\x9c\x0f\xbc\xd4\xd4\xc7\
\xab\xe2\x5e\xe9\x72\x52\x40\xb7\x1d\xdb\x35\x50\xbd\xb2\x00\x18\
\x8d\x5f\x36\xed\x8f\x75\xdf\xfc\xec\x20\x9d\x4f\x0a\xe8\xd6\x23\
\x3b\x86\xa8\x57\x12\x00\xa3\xb8\xe7\xf1\xf6\xc6\xf3\x49\x01\x0d\
\x79\x74\xbf\xb4\x3a\xe5\x00\x48\x4b\x52\x40\xed\x6d\xed\xa2\x62\
\xe2\x62\x9d\xd4\x2c\x06\x40\x4a\x49\x01\x6d\x54\xb5\xde\x8a\x5d\
\x27\xf7\x7d\xac\x66\x31\x00\x52\x4a\x0a\xe8\xa7\x6d\x7b\x7d\x42\
\x40\x01\xf5\x75\x6a\xdc\x36\x69\x25\xa1\x14\x3f\xb3\xb8\xe5\x2f\
\x74\x3d\xf4\xf1\x03\x4f\xc5\x2b\x02\x90\xe4\xfd\x66\x1d\xc7\x18\
\xcf\xa7\x08\xe8\x82\x2f\x67\x96\xe6\x28\x67\x80\x7a\xe6\x0d\x9e\
\x56\x2e\xf9\xe5\x54\xab\xfa\x49\xab\x1a\x11\x52\x40\x79\xf3\x86\
\x4c\x2f\x5b\xb4\xa0\xfb\xb5\xe4\xf3\x4c\xae\x2c\x2f\x85\xf4\x7c\
\xe0\xa5\x26\xdf\xae\x9a\xb5\x59\x5a\xa9\x57\x99\xf2\x00\xcb\xd4\
\xb1\x51\xdb\x89\xdd\x9a\x77\x1c\x6d\xea\xba\x34\x37\x37\xf3\xf1\
\xaa\xb8\x6f\xe5\xe8\x85\x2e\x39\x57\x16\x80\x8c\xb0\xc1\x36\x20\
\x30\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\
\x02\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x08\x28\x20\x30\x02\
\x0a\x08\x8c\x80\x02\x02\x23\xa0\x80\xc0\x32\x15\xd0\x9b\xa1\xb7\
\x2b\x6e\xf7\xdf\xf3\x59\xe0\x9d\xeb\x35\xae\xdc\xbc\x56\x27\xa7\
\x8b\x12\x9d\xb4\x2d\x9e\x55\x86\xb7\x02\x52\x93\x76\x8a\x50\xa9\
\x74\x85\x03\xaf\x57\xac\xb9\xb1\xf6\x2b\x35\x7e\xcb\xe8\xf6\x69\
\x06\xf4\x49\xe4\x53\xb7\x81\xb3\xbf\xba\x14\x1e\x15\xe1\x6a\xde\
\x12\x73\x3f\xc2\x99\x16\xfe\x74\x65\x44\xda\x63\x49\xe8\x99\x43\
\x9e\xfb\xce\x1c\xea\x69\x9c\x37\xb6\xc7\xf0\x56\xd5\xcb\xfa\xec\
\x34\x75\xfb\x54\x01\x95\xb6\xff\xfc\x60\xc2\x27\x8f\x13\x75\x89\
\xfc\x4f\x23\x8b\x78\xcb\xc8\x31\x6e\xd9\xd4\x1d\xd2\xe9\x94\x8f\
\x7d\xeb\x94\xf3\xf0\x3e\x96\xfc\xba\x14\x01\x9d\xb7\x65\xf1\xc2\
\xdd\x27\xf7\xf7\x55\xb2\x38\x00\xff\xf9\x6a\xa1\xdf\x51\xf7\x02\
\x85\x83\xe6\x7f\x31\xc3\xcb\x38\x2f\x29\xa0\x63\x96\x4c\xda\x77\
\x21\x28\xa0\xb1\x2a\x95\x01\x30\x90\x76\x20\xff\xfe\x84\xbe\x4f\
\x57\x8d\x5e\x64\xd8\x93\x89\x21\xa0\xeb\xf6\x6f\x1e\x43\x38\x01\
\x31\x3c\x8b\x89\xce\x3b\xf8\x87\x91\x7f\xcf\x1a\x30\xa9\x8a\x21\
\xa0\xab\xff\xfc\x75\x9c\xda\x45\x01\xf8\x1f\xe9\xe0\xbe\xd2\x02\
\x5a\xfd\xca\x74\x8e\xa4\x0d\x40\x3d\xd2\xc1\xb4\xf5\xbf\x1f\xdd\
\x31\x58\xed\x42\x00\xa4\x76\xeb\xfe\x9d\x0a\xfa\xe8\xd8\x18\x67\
\xb5\x0b\x01\x60\x5a\xb6\x56\xf5\xf3\x1d\xda\xd4\x5c\x75\x08\xeb\
\xf4\x85\xbb\xba\xad\x3b\x02\x64\xdd\xb7\x73\x5b\x1f\x5d\x85\xb2\
\x85\xcd\x5c\x91\x78\xfc\xa6\xef\x95\x7d\x5f\x4b\x78\x0f\x4d\x9c\
\x75\x40\x17\x1f\xff\x5c\xd6\x7d\x59\x17\x17\x10\x18\x01\x05\x04\
\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\
\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\
\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\
\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\
\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\
\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\
\x40\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\
\x60\x04\x14\x10\x18\x01\x05\x04\x46\x40\x01\x81\x11\x50\x40\x60\
\x04\x14\x10\x58\xb6\x02\xea\x37\x7d\xaf\xb9\xea\xd0\xa4\x75\x5b\
\xce\xab\x5d\x82\xf0\x78\x0f\xa5\x4f\x5f\xbc\x50\xd1\x2b\xb7\x1f\
\xdc\x2d\xaf\x76\x21\x00\x52\xd3\x77\x6d\xda\xc1\xf7\xbb\x75\x73\
\xd7\xa8\x5d\x08\x80\x94\x8a\x17\x2a\x76\x59\x5f\xdf\xe7\xf5\xb5\
\x04\x14\x10\xcf\xf4\x4f\xc7\xd5\x34\x7c\x07\x9d\xd0\x7b\x54\xa3\
\xd1\x8b\x27\x1e\x50\xbb\x20\x00\xff\x79\xb5\x5c\xb5\x6d\x0e\x76\
\xf6\x91\x86\x80\x56\xf2\x7c\xe5\x60\xb7\xe6\x1d\x47\xaf\xdc\xb3\
\x61\x82\xda\x85\x89\x27\xf1\xc5\x64\xa5\x76\x11\xb0\x20\xc5\x5c\
\x8b\x5c\x1d\xdd\xfd\xcb\xb7\xa5\xf3\x49\x4b\x71\x3b\x36\x6a\x3b\
\xb1\xa8\x6b\x91\x7f\xa6\xaf\xfd\x61\xad\x7a\xa5\x89\x88\x70\x42\
\x39\x8d\xaa\xd6\x5b\x31\xb8\x63\xbf\xee\xc6\xcb\x29\x7e\x66\xa9\
\x57\xf9\xb5\x75\xd2\xd4\x7b\xea\xa0\xdb\x61\xe1\x8f\x8a\x29\x5f\
\x1e\x60\xb9\x56\x8c\x5a\x50\xc0\xd9\xc1\xe9\x71\xf2\x79\x26\x7f\
\x07\x5d\x3c\x7c\x76\x71\xe9\x74\xc2\xf2\xe9\xdb\x4e\x5d\x3d\xf7\
\xa6\x02\xb5\x41\x0b\xf8\x36\x90\x65\xa5\xdc\x3d\xce\x4f\xee\x3b\
\xb6\x9e\xa3\xbd\x43\xb8\xa9\xeb\xd3\x5d\x51\x61\x74\xf7\xa1\x6f\
\x19\xcf\xff\x71\x6c\xf7\x80\xbf\x2e\x1c\xeb\x12\x74\x37\xb8\x7a\
\x74\x6c\x8c\xb3\xb9\x0b\x85\x06\xfc\x7f\x38\xc9\x69\xda\xbc\x8a\
\x79\x9e\xae\x53\xb1\xe6\xc6\xb7\xeb\xbc\xf1\xbd\x83\x9d\x43\x44\
\x46\xb7\xcf\xf4\x9a\x44\x6f\xbe\xde\xe2\x07\x69\xca\x5e\x79\x00\
\xb2\x82\x75\x71\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\
\x04\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\
\x46\x40\x01\x81\x11\x50\x40\x60\x04\x14\x10\x18\x01\x05\x04\x96\
\x66\x40\x67\xac\x9b\xbb\xfa\xd0\xf9\x63\x5d\x95\x2c\x06\xb0\x44\
\x85\x5c\x5c\x6f\x2e\x1a\x3a\xab\xa4\xa9\xeb\x52\x05\x34\xe2\x59\
\x44\xc1\xee\x93\x3e\x7d\x98\xf3\x65\x01\x90\x3c\x78\xf2\xd0\xa3\
\xfd\x98\xee\x89\xbd\x5a\x77\xfb\xa2\x4d\xdd\x56\x33\x93\x5f\x97\
\x2a\xa0\x84\x13\x50\xc7\x92\xed\x2b\x67\xd8\xdb\xd9\x47\xb6\xac\
\xd9\x64\xa1\x71\x5e\x8a\x80\x4a\x29\x56\xbe\x2c\x00\x46\x3f\x6e\
\x59\xb2\xc0\x64\x40\x43\x1e\xdd\x2f\xad\x4e\x49\x00\x92\x93\x76\
\xdc\x37\xb4\xcb\x80\x2e\xd2\xf9\xa4\x80\x2e\xd9\xbe\x62\x66\xda\
\x77\x01\xa0\x94\xc3\x17\x8e\x77\x4e\x15\x50\xff\x80\xd3\x6d\xd5\
\x2b\x09\x80\x29\xfc\x0e\x0a\x08\x2c\x29\xa0\x5e\x45\x3d\x4f\x07\
\xde\xbd\x5e\x43\xcd\x62\x00\xa4\x94\x14\xd0\x2e\x4d\xdb\x8f\x9b\
\xbc\x72\xe6\x66\x15\x6b\x01\xa0\xfb\xef\xd0\x0f\xc6\xf3\x49\x01\
\xad\xfd\x4a\x8d\x2d\xea\x94\x03\x20\xb9\x49\x7d\xc7\x34\x30\x9e\
\x4f\xf1\x1d\x54\xda\x51\xb5\xb4\x37\x79\xe5\x4b\x02\x20\xf1\x29\
\x5d\x61\x9f\x8b\x73\xbe\x50\xe3\xe5\x14\x01\x7d\xb5\x5c\xd5\x3f\
\x3e\x68\xd1\x79\xe4\x8a\xdd\xeb\x26\x29\x5f\x1a\x60\xd9\x0a\xb9\
\xb8\xde\x18\xd7\x6b\x64\xd3\xe4\xf3\x52\x2d\xc5\x7d\xb7\x61\x9b\
\xc9\xd2\xc7\xdd\x41\x73\x46\x5c\x54\xae\x34\xc0\xb2\x75\x6f\xd9\
\x65\x44\x87\x06\x6f\x4f\x79\x79\xbe\x3e\x31\x51\xde\xda\x7d\x77\
\xc3\x42\xbc\x4f\x04\x9c\x7e\xe7\x52\xf0\x95\x06\x37\x42\x6f\x55\
\xbe\xfb\x30\xc4\x3b\xdb\x55\x02\x00\xa0\x80\x3c\x8e\xce\x61\x45\
\x0a\xba\xff\x5b\xa6\x98\xe7\xa9\x6a\xde\x3e\xbb\x5f\xaf\x58\x73\
\x63\x56\x1f\x23\x4b\xeb\x29\x48\x2b\xdb\x6f\x3d\xb2\x63\x48\x56\
\x9f\x04\x00\x00\x91\x44\x3c\x8b\x2c\x78\xed\x76\xa0\x34\xd5\xda\
\x79\x62\x6f\x3f\xe3\x7c\xf7\x82\x6e\x81\xbd\x5a\x77\x1b\x52\xfb\
\x95\x1a\xbf\x65\xf4\x18\x19\x36\xd0\xc7\x11\x4f\x8a\xf8\x2d\x9d\
\xba\xe3\xfa\xbd\x1b\x55\xb3\x5b\x30\x00\x00\x22\x0b\x09\x0b\xf5\
\x9a\xbc\x72\xa6\x61\xad\xa0\xce\x4d\xda\x8f\x7b\xaf\x69\x07\xdf\
\xb4\x6e\x9b\x6e\x03\x5d\xf2\xc7\x8a\x99\x5b\x8f\xee\x1c\x6c\xe6\
\xfa\x00\x00\x10\xde\xba\x7d\x9b\xc6\x4a\xd3\xf4\x4f\xc7\xd5\x2c\
\x53\xac\xf4\xa9\x97\xaf\x4f\xb3\x81\x4e\x59\xfd\xfd\xc6\x63\x97\
\x4e\xb6\xcf\xd9\xf2\x00\x00\x10\xdb\xd0\x1f\xc7\x9e\x94\x56\x83\
\x97\xd6\xb4\x4d\x3e\xdf\x64\x03\xdd\x72\xf8\x8f\x2f\x69\x9e\x00\
\x00\xfc\x67\xe2\x8a\xef\xb6\xae\x1d\xbb\xd8\xd9\x56\x6f\x1b\x6d\
\x9c\x67\xb2\x81\x6e\x3b\xba\x6b\x90\x72\x65\x01\x00\x20\xb6\xc4\
\xc4\x44\xeb\x6d\xc7\x76\x0f\x68\x57\xff\xcd\xe9\xc6\x79\xa9\x1a\
\x68\x78\x54\x84\xeb\xfd\x27\x0f\x4d\x1e\x20\x02\x00\x00\x4b\x15\
\x74\x2f\xb8\x5a\xf2\xcb\xa9\x1a\x68\x5e\xa7\x3c\x0f\x1d\xec\x1c\
\x22\xa2\x63\xa3\xf3\x28\x56\x15\x00\x00\x82\x73\xcb\x5f\x28\x38\
\xf9\x65\x93\x8b\x70\x9b\x54\xaf\xbf\x74\xfb\xf1\x3d\xfd\x95\x29\
\x09\x00\x00\xf1\x35\xaa\x5a\x6f\x45\xf2\xcb\x26\x1b\xe8\xc7\x6f\
\xf7\x1c\x70\xe2\xf2\x99\x77\xa4\xc3\x89\x2a\x53\x16\x00\x00\xe2\
\x7a\xbf\x59\xc7\x31\x25\x0a\x17\x0b\x48\x3e\x2f\xcd\xcd\x58\x16\
\x0d\x9d\x55\x72\xd0\xec\x11\x17\x6f\xde\xbf\x5d\x31\xe7\x4b\x03\
\x00\x40\x4c\x69\xee\x0b\x37\xbd\x3b\xcd\x1e\xf4\x6d\xa5\x6d\xc7\
\x76\x0d\xfc\x69\xdb\xf2\xd9\x39\x57\x1a\x00\x00\xe2\x29\x52\xd0\
\xed\xdf\x89\x7d\x46\x37\x2c\x98\xb7\xc0\x1d\x53\xd7\x67\xb8\x2b\
\xbf\xb7\x5e\x6f\x39\x47\x9a\x0e\x9c\x3b\xfc\xc1\xbc\x2d\x4b\x16\
\xc6\xc6\xc5\x3a\x9a\xbf\x4c\x00\x00\xc4\x20\xed\x30\x61\x60\x87\
\x8f\x3f\x4a\x7e\xec\x32\x53\x32\xbd\x33\x79\xe9\xc7\x53\xe3\x0f\
\xa8\x01\xc1\x57\xeb\xed\x3d\x73\xf0\xa3\xe3\x01\xa7\xda\x49\x9b\
\xbd\x64\xb7\x58\x00\x00\xd4\xe2\x5d\xbc\xf4\xc9\x7a\x95\x5f\x5f\
\xdb\xaa\x76\xd3\xf9\xd2\x56\x28\x99\xbd\x5f\x96\x8e\xc6\x62\x54\
\xa1\x54\xb9\xc3\xd2\xd4\xbf\x5d\x9f\x3e\x72\xee\x0f\x00\x40\x6e\
\x27\xab\x81\x02\x00\x60\xe9\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\
\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\
\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\
\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\
\x00\x32\x98\xb5\x81\xde\x0c\xbd\x5d\xf1\xec\xb5\xf3\x2d\xef\x86\
\x85\x78\x3f\x8e\x78\x52\x24\xec\xe9\xa3\x62\x41\xf7\x6e\x54\xe3\
\x10\x68\x90\x24\xbe\x98\xac\xd4\x2e\x02\x80\x45\x71\xcb\x5f\xe8\
\x7a\xf1\x42\x45\xaf\xe4\xcf\x9b\xff\x9e\x6b\xbe\x82\xb7\xca\x96\
\xf0\xf2\xaf\x5a\xa6\xf2\x1e\x7b\x5b\xbb\xa8\xec\x3e\xb6\xac\x06\
\xfa\x24\xf2\xa9\xdb\x1f\xc7\x76\x0f\xd8\xee\xbf\xe7\x33\x0e\x67\
\x86\xcc\xa2\x79\x22\x6b\xf8\xc8\x85\xec\x0b\x7d\xfc\xc0\x53\x9a\
\xd2\xbb\x4d\xed\x0a\x35\xb6\xb4\xaa\xd5\xfc\xc7\xea\x65\x7d\x76\
\x66\xe5\xb1\x33\xd5\x40\xa3\x62\x9e\xe5\x5b\xba\x63\xf5\xb4\xdd\
\x27\xf7\xf7\x4d\xd4\x25\xf2\x8e\x06\x90\x33\x52\xf4\x4c\xfe\xd4\
\x40\x19\xfe\x01\xa7\xdb\x4a\x93\xf1\x72\x51\xd7\x22\xff\x0c\x7e\
\xf7\x93\x1e\xe5\x3c\xbc\x8f\xa5\x77\xbf\x74\x1b\xe8\xbc\x2d\x8b\
\x17\x4a\x4d\xd3\x5c\x45\x02\x40\xba\xe8\x99\x10\xc0\xdd\x87\xf7\
\xca\x7e\xb5\xd0\xef\xa8\x74\xde\xbd\x40\xe1\xa0\x51\x1f\x7c\xf9\
\xb6\x87\x5b\xf1\x4b\x2f\xdf\x2e\x55\x03\x8d\x7f\x1e\x6f\xe7\xb7\
\x74\xea\xce\x0b\x41\x01\x8d\x15\xa8\x13\x00\x00\x61\x85\x3c\xba\
\x5f\x7a\xd0\x9c\x11\x17\x1d\xed\x1d\xc2\x27\xf5\x19\xd3\xc0\xb3\
\x48\xc9\x73\xc6\xeb\x52\x34\xd0\x75\xfb\x37\x8f\x59\xfd\xe7\xaf\
\xe3\x94\x2f\x11\x00\x00\x71\x3d\x8b\x89\xce\x3b\x64\xee\xa8\xb3\
\xa5\xdc\x3d\xce\x4f\xeb\xe7\x57\xcb\x56\x6f\x1b\x93\xd4\x40\x27\
\xaf\x9a\xb9\x39\xf9\x32\x60\x00\x50\x0e\x2b\x0c\x21\x77\x08\x0e\
\xb9\xe9\xd3\x7d\x52\xbf\xb0\x45\x43\xbf\x2f\x69\x68\xa0\x2b\xf7\
\x6c\x98\x40\xf3\x04\xa0\x1e\x9a\x27\x72\x8f\x98\xb8\x58\xa7\x91\
\x3f\x8d\x3f\x64\x68\xa0\xbf\x1f\xdd\x31\x58\xe5\x7a\x00\x00\xc8\
\x35\x6e\xdd\xbf\x53\xc1\xd0\x40\xa3\x63\x63\x9c\xd5\x2e\x06\x80\
\x25\x63\x11\x2e\x72\x1f\x7d\x42\x42\x82\x8d\x1a\x4f\xdc\xa4\x9e\
\x97\xae\x61\x1d\x4f\x35\x9e\x1a\x66\x76\xfa\xc2\x5d\xdd\xd6\x1d\
\x01\x8a\x3f\x6f\xe7\xb6\x3e\xba\x0a\x65\x0b\x2b\xfe\xbc\x30\xbf\
\x99\x0b\x0e\xeb\x9e\x86\xc7\x28\xfe\xbc\xc3\xfa\x37\xd0\x39\x39\
\xda\x2a\xfe\xbc\x30\xbf\x89\xb3\x0e\xe8\xe2\xe3\x9f\x2b\xfa\x9c\
\xec\x0b\x17\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\
\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\
\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\
\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\
\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\
\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\
\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\
\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\
\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\
\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\
\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\
\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\
\x00\x32\xd0\x40\x01\x00\x90\x41\x6f\x6d\x6d\xfd\x5c\x8d\x27\xde\
\x77\x38\xd0\x30\x01\x72\xad\xdb\x72\x5e\xed\x12\x60\x36\x89\x2f\
\x26\x2b\xc5\x9f\x75\xda\xdc\x43\x8a\x3f\x27\xb4\xc3\xf0\x0d\xd4\
\xab\x98\xe7\xe9\xc0\x3b\xd7\x6b\xa8\x5d\x0c\x00\x00\xb9\x85\xa1\
\x81\x7e\xd4\xea\xfd\x2f\xc7\x2c\x99\xb4\x4f\xed\x62\x00\x58\x2a\
\xe5\xbf\x7d\x02\xd9\xe1\xe3\x55\x71\xaf\xa1\x81\x56\x2e\x5d\x61\
\xff\xe0\x8e\xfd\xba\xcf\xda\x30\x7f\xb9\xda\x45\x01\x00\x20\xb2\
\x92\xee\x25\x2e\xf8\x7d\x38\xa2\x45\xd2\x4a\x44\x8d\xaa\xd6\x5b\
\x21\x75\xd4\xcf\xe7\x7c\x7d\x3e\xe2\x59\x64\x41\x35\x8b\x03\x00\
\x40\x44\x5d\x9b\x76\xf0\xed\xd2\xa4\xfd\x38\xe9\x7c\x8a\xb5\x70\
\x0b\xe6\x2d\x70\x67\xf9\xc8\xf9\xae\x67\xaf\x9d\x6f\xe9\xb7\x74\
\xea\x4e\x75\xca\x43\xee\xa3\xce\x0a\x20\x00\xa0\x94\xba\x95\x6a\
\x6f\x18\xd6\x75\x60\xa7\xe4\xf3\x4c\x6e\xc6\x52\xcd\xdb\x67\xd7\
\xa6\xf1\xcb\xad\x42\xc2\x42\xbd\xa6\xaf\x9b\xbb\xe6\xda\xed\xc0\
\x5a\xca\x94\x88\xdc\x89\xe6\x09\x40\x7b\x1c\xec\x1c\x22\x7a\xbd\
\xd9\x6d\x48\x8b\x57\x1b\xff\x64\xea\xfa\x74\xb7\x03\x75\x2f\xe8\
\x16\x38\xad\x9f\x5f\x6d\xe9\xbc\xd4\x4c\x97\xed\x5e\xfb\xed\x91\
\x0b\xfe\x9d\xd2\xbb\x0f\x2c\x1c\x5f\x46\x01\xe4\x62\xc5\x0b\x15\
\xbd\xd2\xa9\x71\xdb\x09\xd2\xcf\x9a\x19\xdd\x36\xd3\x3b\x52\x90\
\x9a\xe9\xb0\x2e\x03\x3b\xeb\xba\xfc\x77\x39\x32\x3a\x2a\xff\x9f\
\xa7\x0e\xf4\xfa\xeb\xc2\xb1\x2e\xff\xdc\x0a\xac\x2d\xbf\x5c\x68\
\xca\xff\x37\x4f\xfa\x28\xb2\x8b\xf7\x10\x72\x5a\xfe\x3c\x2e\xf7\
\xea\x55\x7e\x6d\x7d\xf3\x17\xdf\x30\x3d\x8b\x78\xfc\x9d\xd5\xfb\
\xcb\xde\x13\x91\xb3\x83\xd3\xe3\x77\xea\xb5\x9e\x21\x4d\x2f\x5f\
\x17\x74\x37\xb8\xda\x8d\xd0\x5b\x95\x6f\x3f\xb8\x5b\x3e\xec\xe9\
\xe3\x62\x8f\x23\x9e\xb8\x87\x3f\x8b\x70\x7d\x14\xfe\xb8\xe8\x93\
\xc8\xa7\x6e\xb1\x71\xb1\x8e\x72\x9f\x17\xb9\x43\xaa\x3f\x7c\xc6\
\xbf\x86\x2f\x9f\x02\x69\x78\xf9\xed\xc1\x5b\x06\x99\xe1\xe2\x9c\
\x2f\xf4\x45\x63\x0c\x91\x9a\xa3\x4b\x9e\x17\xe7\x9d\x5d\x42\x0a\
\xe7\x77\xbd\x51\xa6\x78\xe9\x93\x5e\x45\x3d\xcf\xd8\xdb\xda\x45\
\x99\xeb\xb9\x72\x64\x57\x7e\xa5\x8b\x96\x3a\x2b\x4d\x39\xf1\xd8\
\x00\x00\x88\x80\x7d\xe1\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\
\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\
\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\
\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\
\x03\x05\x00\x40\x86\x2c\x37\xd0\x7b\x61\xa1\x65\xb6\x1e\xd9\x3e\
\x64\xf7\xa9\x03\xbd\xe3\xe2\xe3\x1c\x72\xa2\x28\x00\x00\x94\xe4\
\x55\xb4\xd4\x99\x56\xb5\x9b\xfd\xd8\xa2\x66\x93\x45\x99\xbd\x4f\
\xa6\x1a\x68\x4c\x5c\x8c\xd3\x8c\xf5\xf3\x56\xf9\x07\x9c\x6e\x2b\
\xbf\x3c\x00\x00\xc4\x14\x78\x37\xb8\xfa\xbc\x2d\x4b\x16\x4a\x93\
\x74\xf9\x93\x36\x1f\x7e\x26\x35\xd4\xf4\xee\x93\x61\x03\x9d\xb0\
\x7c\xfa\xb6\x53\x57\xcf\xbd\x69\xae\x22\x01\x00\x10\xdd\x82\xad\
\xbf\xcc\x93\xa6\x81\xed\xfb\x7e\xd4\xb4\x46\xc3\x5f\x4c\xdd\x26\
\xcd\x06\x1a\x78\xe7\x7a\x8d\x2f\x7f\x1c\x73\x2a\xc7\xaa\x03\x00\
\x40\x70\x73\x36\x2d\xfa\xf9\x78\xc0\xa9\x76\x5f\x77\x1b\xd2\xee\
\xe5\xeb\x4c\x36\xd0\xfb\x8f\x1f\x96\xa4\x79\x02\x00\xa0\xd3\xf9\
\x5f\x3e\xdd\x76\xe6\xfa\x79\x2b\x87\x74\xfa\xac\x5b\xf2\xf9\x26\
\x1b\xe8\xe2\x3f\x56\xcc\x52\xa4\x2a\x00\x00\x72\x81\x83\x7f\x1f\
\x7d\xbf\xf9\xab\x8d\x17\xfb\x78\x55\xdc\x6b\x9c\x97\xaa\x81\x26\
\xea\x12\xad\x8e\x07\x9c\x6c\xaf\x6c\x69\x00\x00\x88\xed\x78\xc0\
\xa9\xb6\xe9\x36\xd0\x67\x31\xd1\x79\x95\x2d\x09\x00\x00\xf1\x3d\
\x8b\x4d\xd9\x1f\x53\x35\x50\x27\x7b\xc7\xa7\xa5\x8b\x94\x3c\x1b\
\x74\xef\x46\x35\xc5\xaa\x02\x00\x40\x70\x15\x4b\x95\xfb\x2b\xf9\
\x65\x93\xbf\x81\x76\x6a\xdc\x6e\xc2\xd4\x35\xb3\x37\x28\x53\x12\
\x00\x00\x62\x2b\x52\xd0\xed\xdf\x66\x35\x1a\x2d\x49\x3e\xcf\x64\
\x03\xad\x53\xa9\xd6\xaf\xef\x35\x7d\xd7\x77\xf5\xde\x5f\xfd\x94\
\x29\x0d\x00\x00\x71\x4d\xec\x3d\xba\xd1\xcb\xf3\xd2\xdc\x0e\xb4\
\x73\x93\x76\xe3\x4a\x15\xf1\xf8\xfb\xdb\x55\xb3\x36\xe5\x6c\x59\
\x00\x00\x88\xa9\x42\xc9\x72\x87\x27\xf5\x1d\x53\xdf\xd4\x75\xe9\
\xee\x89\xe8\xb5\x0a\xaf\x6e\xde\x34\x7e\xb9\xd5\x9a\xbd\x1b\xfd\
\xd6\xee\xdb\x34\x36\x67\xca\x03\x00\x40\x2c\xf9\xf3\xb8\x84\x8c\
\xfa\xe0\xcb\xb7\xbd\x8b\x97\x3e\x99\xd6\x6d\x32\xb5\x2f\xdc\xae\
\x4d\x3b\xf8\x4a\xd3\xf5\x7b\x37\xab\xac\xfa\x73\xfd\x84\x13\x97\
\xcf\xb4\x31\x5f\x99\x00\x00\x88\xa1\x4b\x93\xf6\x7e\x2f\xfa\xdd\
\x37\x99\xb9\x6d\x96\x8e\xc6\xe2\x59\xc4\xe3\xef\x91\xdd\xbe\x78\
\x27\xf9\xbc\x2b\x37\xaf\xbd\x7e\x23\xe4\xa6\xcf\x83\x27\x61\x1e\
\x91\xd1\x51\xf9\xe3\xe2\xe3\xec\xf5\x7a\x7d\x6c\x56\x1e\x17\x00\
\x00\xa5\x58\xe9\xac\x12\xf2\x3a\xe5\x09\x73\x2f\x50\x38\xc8\xab\
\xa8\xe7\xe9\x92\xee\x25\x2e\xc8\x79\x9c\x6c\x1f\x0f\xb4\xbc\x87\
\xf7\x31\x69\xca\xee\xe3\x00\x00\x90\x9b\x70\x40\x6d\x00\x00\x64\
\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\
\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\
\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\
\x03\x0d\x14\x00\x00\x19\x14\x69\xa0\xf7\xc2\x42\xcb\x3c\x78\xf2\
\xd0\x43\x89\xe7\x42\xee\x10\x19\x13\xe5\x12\x19\x15\x95\xff\x59\
\x5c\x74\x5e\x47\x5b\xfb\x48\x27\x07\xa7\xc7\x79\x1c\x9d\x1f\xa9\
\x5d\x17\x72\x87\xd8\xb8\x38\x87\xc8\xe8\xc8\x02\x91\xd1\x51\x2e\
\xcf\x13\x13\xf4\xce\xf6\x8e\x4f\x9c\x1d\x9c\x1f\x3b\xda\x3b\x84\
\xab\x5d\x1b\xc4\x60\xab\xb7\x8d\x29\xe5\xee\xf1\xb7\x83\x9d\x7d\
\x64\x4e\x3d\x47\xb6\x1b\xe8\xb9\x6b\x17\x5a\x9c\xbc\x7a\xf6\x2d\
\xff\x80\x53\x6d\x43\x1f\x3f\xf0\x34\x43\x4d\xd0\x8a\x44\x9d\x74\
\xd8\x03\x00\x10\x4e\x35\x6f\x9f\x5d\xb5\xca\x57\xdf\x5a\xb7\x72\
\xed\xf5\xd2\xb1\x3f\xe5\x3c\x46\x96\x1a\xe8\xed\x07\x77\xcb\x6f\
\x3c\xb8\x75\xc4\x81\x73\x47\xba\x3d\x4f\x78\x6e\x2b\xe7\x09\x61\
\x41\x68\x9e\xc8\x22\x3e\x73\x41\x29\x67\xaf\x9d\x6f\x29\x4d\x8b\
\xb6\x2d\x9b\x63\x9c\xf7\xe2\x1b\xeb\xf9\x76\xf5\xdf\x9c\xd6\xa0\
\x4a\x9d\xd5\x36\xd6\x36\xf1\x19\x3d\x46\x86\x0d\xf4\xe4\x95\xb3\
\x6f\xcd\xdd\xfc\xd3\xe2\xc7\x11\x4f\xdc\xb3\x5b\x30\x00\xa4\x87\
\xe6\x09\x35\x05\x87\xdc\xf4\xf9\xfe\xd7\x05\xcb\xa4\x49\xba\xdc\
\xba\x76\xf3\x79\x3d\x5b\xbd\x37\xcc\xde\xd6\x2e\xca\xd4\xed\x4d\
\x36\xd0\xd8\xb8\x58\xc7\x6f\x57\xcd\xda\x74\xe6\xda\xf9\x37\x72\
\xb2\x58\x00\x00\x44\xb5\xdd\x7f\xcf\x67\xd2\xe4\xe4\xe0\xf4\xc4\
\xef\xc3\xaf\x5a\x78\x17\xf7\x3a\x91\xfc\xfa\x54\x0d\x54\x6a\x9c\
\xc7\x03\x4e\xb5\x53\xac\x42\x00\x00\x04\x16\x15\x1d\xe5\x32\x6c\
\xbe\xaf\x7f\x1e\x47\xe7\xb0\xe9\x9f\x8e\xaf\xe9\x5e\xa0\x70\x90\
\x34\x3f\xa9\x81\xde\x79\x70\xaf\xdc\x80\xd9\xc3\x03\x12\x13\x13\
\xad\xd5\x2b\x13\x00\x00\x31\x45\x3c\x8b\x2c\xd8\x6f\xc6\x17\x81\
\x9d\x9b\xb4\x1f\xf7\x5e\xd3\x0e\xbe\x86\x06\x7a\xff\xc9\xc3\x92\
\xfd\xbf\x1f\x76\x45\xed\xe2\x00\x00\x10\xdd\xba\x7d\x9b\xc6\xda\
\xeb\x6d\x9f\x19\x1a\xe8\xac\xf5\x3f\xae\x50\xbb\x20\x00\x00\x72\
\x8b\xe5\xbb\xd7\x4d\x36\x34\xd0\x4b\xc1\x57\x1a\xa8\x5d\x0c\x00\
\x00\xb9\x89\x6a\xbb\xf2\xab\x5e\xb9\x98\xae\x4a\x25\xb6\x8c\xd1\
\x82\xc0\xeb\x61\xba\x43\xc7\x83\x15\x7f\xde\x26\xf5\xbc\x74\x25\
\x4b\xb8\x28\xfe\xbc\x30\xbf\x0d\x5b\x2f\xe8\x22\xa3\xe2\x14\x7f\
\xde\xae\xed\xaa\xe8\xec\xed\x6d\x14\x7f\x5e\x98\xdf\xd2\xb5\x67\
\x14\x7f\x4e\x7d\x42\x42\x82\x2a\xef\x9e\xfc\x2e\x0e\x3a\x4f\x8f\
\x02\x6a\x3c\x35\xcc\x2c\xec\x49\xb4\x2a\xcf\x5b\xb8\x90\x33\xef\
\x21\x8d\xb0\xb1\x51\x67\xdd\x45\x8f\xe2\x2e\x3a\x27\x47\xf6\x09\
\xa3\x05\x7a\xbd\x8d\x2e\x3e\xfe\xb9\xb2\xcf\xa9\xe8\xb3\x01\x00\
\xa0\x11\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\
\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\
\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\
\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\
\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\
\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\
\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\
\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\
\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\
\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\
\x00\x00\x32\xd0\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\
\x00\x00\x64\xa0\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\xf4\xd6\
\xd6\xd6\xcf\xd5\x78\xe2\xeb\x37\xc2\x74\x09\x89\x89\x6a\x3c\x35\
\xcc\xec\x5e\x48\xb8\x2a\xcf\x7b\xf1\x72\x88\x2e\xe4\x7e\x84\x2a\
\xcf\x0d\xf3\x8a\x89\x51\xe5\xcf\x90\xee\xb0\x7f\xb0\xce\xd6\xd6\
\x46\x95\xe7\x86\x79\xc5\xc7\x2b\xff\x1e\x32\x7c\x03\x75\xb2\x77\
\x7c\x1a\x15\xf3\x2c\x9f\x92\x4f\x1c\x74\xf3\xb1\x61\x02\xe4\xba\
\x78\x25\x54\xed\x12\x90\xcb\x1d\x39\x71\x43\xed\x12\x90\x8b\x19\
\x1a\x68\xed\x0a\xaf\x6e\xde\x7f\xf6\xaf\x1e\x6a\x17\x03\x00\x40\
\x6e\x61\x68\xa0\x03\xda\xf5\xe9\xe3\x7f\xf9\x74\xdb\xa8\xe8\x28\
\x17\xb5\x0b\x02\x00\x40\x74\xbd\x5a\x77\x1b\x62\x68\xa0\x36\x36\
\x36\x71\x4b\x86\xcf\x29\xf6\xc9\x77\x43\x82\x9e\x44\x3e\x75\x53\
\xbb\x30\x00\x96\x46\x5a\x1f\xc2\x4a\xed\x22\x80\x4c\xe9\xf7\xce\
\x87\x9f\xbe\x51\xab\xd9\xfc\xa4\xb5\x70\xed\x6d\xed\xa2\x7e\x19\
\x31\xd7\x7d\xe7\x89\x3f\xfb\xcd\xff\xed\x97\x1f\xd5\x2c\x0e\x80\
\xa5\xa1\x79\x42\x7c\x5e\x45\x4b\x9d\x99\xd0\x7b\x74\x23\x47\x7b\
\x07\xc3\x9a\x93\xa9\x36\x63\x91\xba\xaa\x34\xfd\x76\x78\xfb\x17\
\x3f\xef\x58\xf5\x9d\xf2\x25\x02\x00\x20\x8e\x32\xc5\x4a\x9f\xf2\
\xed\x39\xfc\x8d\xbc\x4e\x79\x1e\x26\x9f\x9f\xe6\x76\xa0\xef\xd4\
\x6b\x3d\x43\x9a\x1e\x3e\x7d\x54\xfc\x87\x4d\x8b\x96\x9c\xbd\x76\
\xbe\x65\xce\x97\x89\x5c\x8d\xa5\x70\x00\x34\x22\x9f\x53\xde\x07\
\x9f\xb6\xed\xf5\xc9\xeb\x15\x6b\x6e\x4c\xeb\x36\x19\xee\x48\xc1\
\x35\x5f\x81\xdb\x52\xe7\x35\x5e\xfe\xf3\xf4\xc1\x8f\x36\x1d\xda\
\x36\xfc\xf6\x83\x3b\xaf\x98\xab\x50\x68\x04\xcd\x13\x40\x2e\x25\
\xfd\x8c\xd9\xac\x46\xa3\x25\xed\x1b\xbc\x35\xb5\x90\x8b\xeb\xcd\
\xcc\xdc\x27\xcb\x7b\x22\x6a\x56\xa3\xe1\xcf\xd2\x64\xbc\x1c\x1d\
\x1b\x9d\xe7\xf4\x3f\x7f\xb7\x3a\x71\xf9\x4c\x9b\x93\x57\xce\xb4\
\x89\x78\x16\x59\x20\xab\x8f\x09\x00\x80\x52\x7c\xbc\x2a\xee\xad\
\x51\xb6\xea\xf6\x3a\x2f\xbe\x5d\xba\x17\x74\x0b\x94\xfb\x38\xd9\
\xde\x95\x9f\x83\x9d\x43\x44\xdd\x4a\xb5\x37\x48\x53\x76\x1f\x0b\
\x00\x80\xdc\x82\x7d\xe1\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\
\xa0\x00\x00\xc8\x40\x03\x05\x00\x40\x06\x1a\x28\x00\x00\x32\xd0\
\x40\x01\x00\x90\x81\x06\x0a\x00\x80\x0c\x34\x50\x00\x00\x64\xa0\
\x81\x02\x00\x20\x03\x0d\x14\x00\x00\x19\x68\xa0\x00\x00\xc8\x40\
\x03\x05\x00\x40\x06\xd9\x0d\xf4\xc1\x93\x87\x1e\x27\x2e\x9f\x79\
\xe7\x52\xf0\x95\x06\x37\x42\x6e\x56\x96\x8e\x1b\x1a\x1d\x1b\xe3\
\x9c\xe6\x1d\x38\xd4\x15\x00\x40\x49\x89\x69\x5f\xe5\x92\x27\xdf\
\x7d\xf7\x02\x85\x03\xa5\x83\x65\x57\xf3\xae\xbc\xfb\xd5\x72\xd5\
\xb6\x65\xf5\xe1\xb3\xd4\x40\x97\xef\x5e\x37\x79\xe3\xc1\xad\x23\
\xb2\xfa\x24\x00\x00\x88\x24\xec\xe9\xa3\x62\xd2\x14\x10\x7c\xb5\
\xfe\xef\x47\x77\x7e\x6e\x9c\x5f\xbc\x50\xd1\xcb\xbd\xde\xfc\x60\
\x48\x8d\xb2\x55\x76\x64\xf4\x18\x19\x36\xd0\xf0\xa8\x08\xd7\x71\
\xcb\xa6\x6d\xbf\x76\x3b\xb0\x56\x76\x0b\x06\x00\x40\x64\xb7\x1f\
\xdc\x7d\x65\xfc\x8b\x9e\x27\x9d\x7f\xbf\x59\xc7\x31\x9d\x1a\xb7\
\x9d\x90\xd6\x6d\xd3\x6d\xa0\x4b\x77\xae\x99\xba\xf9\xaf\x6d\xc3\
\xcc\x5d\x20\x00\x00\xa2\x5b\xf5\xe7\x86\xf1\xd2\x34\xb3\xff\xc4\
\x6a\x9e\x45\x4a\x9e\x7b\xf9\xfa\x34\x1b\xe8\xb4\x35\x73\xd6\x1f\
\xb9\xe8\xdf\x31\x67\xcb\x03\x00\x40\x6c\x43\xe6\x8e\x3a\x3b\xb6\
\xc7\xb0\x56\xd5\xcb\x56\xd9\x99\x7c\xbe\xc9\x06\xba\xf5\xe8\xce\
\xc1\x34\x4f\x00\x00\xfe\x33\x71\xc5\x77\xbf\xaf\x19\xbb\xd8\x59\
\x6f\xa3\x8f\x35\xce\x33\xdd\x40\x8f\xec\x18\xac\x58\x55\x00\x00\
\x08\xee\x79\x42\x82\xfe\x8f\xe3\xbb\x07\xbc\x53\xb7\xf5\x0c\xe3\
\xbc\x54\x0d\x54\x5a\x69\xe8\xfe\xe3\x07\xa5\x94\x2d\x0d\x00\x00\
\xb1\x05\xde\x09\xae\x9e\xfc\x72\xaa\x06\x9a\xd7\x29\xcf\x43\x7b\
\x5b\xfb\xc8\x98\xb8\x74\xb6\xe9\x04\x00\xc0\xc2\x14\x72\x29\x78\
\x33\xf9\x65\x93\x8b\x70\x9b\x54\xaf\xbf\x6c\x87\xff\x9f\x9f\x2a\
\x53\x12\x00\x00\xe2\x6b\x58\xb5\xee\xca\xe4\x97\x4d\x36\xd0\x4f\
\xda\x7c\xf8\x99\x7f\xc0\xe9\xb6\x61\xe1\x8f\x8a\x29\x53\x16\x00\
\x00\xe2\xea\xda\xb4\xc3\x37\x25\xdd\x4a\x5c\x4c\x3e\x2f\xcd\xcd\
\x58\x16\x0f\x9f\x5d\xbc\xff\xac\x61\x57\xee\x3c\xbc\x57\x2e\xe7\
\x4b\x03\x00\x40\x4c\xdd\x9a\x77\x1c\xdd\xb1\x51\xdb\x89\x2f\xcf\
\x4f\x77\x47\x0a\x73\x07\x4f\x2b\xff\xdb\xe1\xed\x5f\xfc\xbc\x63\
\xd5\x77\x39\x57\x1a\x00\x00\xe2\x29\xe4\xe2\x7a\x63\x52\xdf\x31\
\x0d\x0a\xbf\x38\x35\x75\x7d\x86\xbb\xf2\x7b\xa7\x5e\xeb\x19\xd2\
\xf4\xe7\xe9\x03\xbd\xe6\x6d\x5e\xb2\x30\x21\x31\xc1\xc6\xfc\x65\
\x02\x00\x20\x86\xaa\xde\x95\x77\x0f\x6c\xdf\xb7\x97\x6b\xbe\x82\
\xb7\xd2\xbb\x5d\xa6\x77\x26\xdf\xac\x46\xa3\x25\xd2\x24\x9d\x3f\
\x1f\x14\xd0\xe4\xcf\x53\x07\x7a\x1d\xbb\x74\xb2\x3d\x6b\xeb\x02\
\x00\x72\xb3\x92\x6e\x25\x2e\x34\xa8\x5a\x67\x75\xab\x5a\xcd\xe6\
\xe7\x71\x74\x0e\xcb\xec\xfd\xfe\x0f\x0b\x73\x2b\xcb\x04\x87\x68\
\x0e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0b\
\x0a\x17\x74\xe0\
\x00\x62\
\x00\x72\x00\x65\x00\x65\x00\x64\x00\x65\x00\x72\x00\x5f\x00\x6d\x00\x61\x00\x70\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x30\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7f\xfd\xcd\xc3\xb8\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
qInitResources()
| 62.634538 | 103 | 0.727131 |
c6e972384085a17d4254d8b48954d37e8355bbe9 | 5,503 | py | Python | api/telegram.py | ongzhixian/python-apps | 11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6 | [
"MIT"
] | null | null | null | api/telegram.py | ongzhixian/python-apps | 11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6 | [
"MIT"
] | null | null | null | api/telegram.py | ongzhixian/python-apps | 11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6 | [
"MIT"
] | null | null | null | import json
import logging
import os
import pdb
import re
from helpers.app_helpers import *
from helpers.page_helpers import *
from helpers.jinja2_helpers import *
from helpers.telegram_helpers import *
#from main import *
#from flask import request
################################################################################
# Setup helper functions
################################################################################
################################################################################
# Setup routes
################################################################################
| 35.050955 | 123 | 0.623296 |
c6e9c16512d69ea6fa5eab9288773894d5292bcf | 102 | py | Python | garage/utils/LED-on.py | 1337DS/SmartGarage | 1be4ad010653fc358e59417a26cd34e2146bdbf7 | [
"Apache-2.0"
] | 1 | 2022-02-09T10:36:43.000Z | 2022-02-09T10:36:43.000Z | garage/utils/LED-on.py | 1337DS/SmartGarage | 1be4ad010653fc358e59417a26cd34e2146bdbf7 | [
"Apache-2.0"
] | null | null | null | garage/utils/LED-on.py | 1337DS/SmartGarage | 1be4ad010653fc358e59417a26cd34e2146bdbf7 | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(26, GPIO.OUT)
GPIO.output(26, GPIO.HIGH)
| 12.75 | 26 | 0.735294 |
c6eb3b19d050576ce9764d0276a806ecdcc82b5f | 2,456 | py | Python | experiments/bayesopt/run_direct_surrogate.py | lebrice/RoBO | 0cb58a1622d3a540f7714b239f0cedf048b6fd9f | [
"BSD-3-Clause"
] | 455 | 2015-04-02T06:12:13.000Z | 2022-02-28T10:54:29.000Z | experiments/bayesopt/run_direct_surrogate.py | lebrice/RoBO | 0cb58a1622d3a540f7714b239f0cedf048b6fd9f | [
"BSD-3-Clause"
] | 66 | 2015-04-07T15:20:55.000Z | 2021-06-04T16:40:46.000Z | experiments/bayesopt/run_direct_surrogate.py | lebrice/RoBO | 0cb58a1622d3a540f7714b239f0cedf048b6fd9f | [
"BSD-3-Clause"
] | 188 | 2015-04-14T09:42:34.000Z | 2022-03-31T21:04:53.000Z | import os
import sys
import DIRECT
import json
import numpy as np
from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM
from hpolib.benchmarks.ml.surrogate_cnn import SurrogateCNN
from hpolib.benchmarks.ml.surrogate_fcnet import SurrogateFCNet
run_id = int(sys.argv[1])
benchmark = sys.argv[2]
n_iters = 50
n_init = 2
output_path = "./experiments/RoBO/surrogates"
if benchmark == "svm_mnist":
b = SurrogateSVM(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "cnn_cifar10":
b = SurrogateCNN(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "fcnet_mnist":
b = SurrogateFCNet(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
info = b.get_meta_information()
X = []
y = []
# Dimension and bounds of the function
bounds = b.get_meta_information()['bounds']
dimensions = len(bounds)
lower = np.array([i[0] for i in bounds])
upper = np.array([i[1] for i in bounds])
start_point = (upper-lower)/2
x, _, _ = DIRECT.solve(wrapper,
l=[lower],
u=[upper],
maxT=n_iters*2,
maxf=n_iters)
X = X[:n_iters]
y = y[:n_iters]
fvals = np.array(y)
incs = []
incumbent_val = []
curr_inc_val = sys.float_info.max
inc = None
for i, f in enumerate(fvals):
if curr_inc_val > f:
curr_inc_val = f
inc = X[i]
incumbent_val.append(curr_inc_val)
incs.append(inc)
# Offline Evaluation
test_error = []
runtime = []
cum_cost = 0
results = dict()
for i, inc in enumerate(incs):
y = b.objective_function_test(np.array(inc))["function_value"]
test_error.append(y)
# Compute the time it would have taken to evaluate this configuration
c = b.objective_function(np.array(X[i]))["cost"]
cum_cost += c
runtime.append(cum_cost)
# Estimate the runtime as the optimization overhead + estimated cost
results["runtime"] = runtime
results["test_error"] = test_error
results["method"] = "direct"
results["benchmark"] = benchmark
results["run_id"] = run_id
results["incumbents"] = incs
results["incumbent_values"] = incumbent_val
results["X"] = X
results["y"] = y
p = os.path.join(output_path, benchmark, "direct")
os.makedirs(p, exist_ok=True)
fh = open(os.path.join(p, '%s_run_%d.json' % (benchmark, run_id)), 'w')
json.dump(results, fh)
| 24.078431 | 74 | 0.678339 |
c6eb612c8a8c4eac0f2f977fa8c04f601c65f1a7 | 1,197 | py | Python | calls/delete_call_feedback_summary.py | mickstevens/python3-twilio-sdkv6-examples | aac0403533b35fec4e8483de18d8fde2d783cfb2 | [
"MIT"
] | 1 | 2018-11-23T20:11:27.000Z | 2018-11-23T20:11:27.000Z | calls/delete_call_feedback_summary.py | mickstevens/python3-twilio-sdkv6-examples | aac0403533b35fec4e8483de18d8fde2d783cfb2 | [
"MIT"
] | null | null | null | calls/delete_call_feedback_summary.py | mickstevens/python3-twilio-sdkv6-examples | aac0403533b35fec4e8483de18d8fde2d783cfb2 | [
"MIT"
] | null | null | null | # *** Delete Call Feedback Summary ***
# Code based on https://www.twilio.com/docs/voice/api/call-quality-feedback
# Download Python 3 from https://www.python.org/downloads/
# Download the Twilio helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
# from datetime import datetime | not required for this example
import logging
#write requests & responses from Twilio to log file, useful for debugging:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/usr/local/twilio/python3/sdkv6x/calls/logs/call_feedback.log',
filemode='a')
# Your Account Sid and Auth Token from twilio.com/console & stored in Mac OS ~/.bash_profile in this example
account_sid = os.environ.get('$TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('$TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
# A list of call feedback summary parameters & their permissable values, comment out (#) those lines not required:
# FSe6b77c80b547957f8ab7329b5c0b556c
client.calls \
.feedback_summaries("FSxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") \
.delete()
| 44.333333 | 114 | 0.734336 |
c6f0d37f8bd7df7e6ea000ba0009d2402adc88b8 | 8,523 | py | Python | z42/z42/web/boot/css_js.py | jumploop/collection_python | f66f18dc5ae50fce95679e0f4aee5e28b2543432 | [
"MIT"
] | null | null | null | z42/z42/web/boot/css_js.py | jumploop/collection_python | f66f18dc5ae50fce95679e0f4aee5e28b2543432 | [
"MIT"
] | null | null | null | z42/z42/web/boot/css_js.py | jumploop/collection_python | f66f18dc5ae50fce95679e0f4aee5e28b2543432 | [
"MIT"
] | null | null | null | # coding:utf-8
import _env
from os.path import join, dirname, abspath, exists, splitext
from os import walk, mkdir, remove, makedirs
from collections import defaultdict
from hashlib import md5
from glob import glob
from base64 import urlsafe_b64encode
import envoy
import os
from tempfile import mktemp
from json import dumps
from z42.web.lib.qbox.uploader import QINIU
import re
from z42.config import QINIU as _QINIU, DEBUG
from extract import extract_map
# for k, v in CSS_IMG2URL.iteritems():
# txt = txt.replace(k, v)
BULID = '/tmp/%s'%_QINIU.HOST
BULID_EXIST = set(glob(BULID + '/*'))
PATH2HASH = {}
if not exists(BULID):
mkdir(BULID)
os.chmod(BULID, 0777)
#with open(join(_env.PREFIX, 'js/_lib/google_analytics.js'), 'w') as google_analytics:
# google_analytics.write(
# """_gaq=[['_setAccount', '%s'],['_trackPageview']];""" % GOOGLE_ANALYTICS)
CSS_IMG2URL = {}
#@import url(ctrl/main.css);
#@import url(ctrl/zsite.css);
#@import url(ctrl/feed.css);
run('css')
run('js')
# for i in BULID_EXIST - set(BULID + '/' + i for i in PATH2HASH.itervalues()):
# if i.endswith('.css') or i.endswith('.js'):
# print 'remove', i
# remove(i)
init = defaultdict(list)
for file_name, hash in PATH2HASH.iteritems():
dirname, file_name = file_name[len(_env.PREFIX) + 1:].split('/', 1)
init[dirname].append((file_name.rsplit('.', 1)[0], hash))
for suffix, flist in init.iteritems():
with open(join(_env.PREFIX, suffix, '_hash_.py'), 'w') as h:
h.write("""#coding:utf-8\n
import _env
__HASH__ = {
""")
for name, hash in flist:
h.write(
""" "%s" : '%s', #%s\n""" % (
name,
hash,
name.rsplit('.', 1)[0].replace(
'.', '_').replace('-', '_').replace('/', '_')
)
)
h.write('}')
h.write("""
from z42.config import DEBUG, HOST, QINIU
from os.path import dirname,basename,abspath
__vars__ = vars()
def _():
for file_name, hash in __HASH__.iteritems():
if DEBUG:
suffix = basename(dirname(__file__))
value = "/%s/%s.%s"%(suffix, file_name, suffix)
else:
value = "//%s/%s"%(QINIU.HOST, hash)
name = file_name.replace('.', '_').replace('-', '_').replace('/', '_')
__vars__[name] = value
_()
del __vars__["_"]
""")
| 30.010563 | 96 | 0.516837 |
c6f1e3f027d95fbea317bf8aa4166e874befc948 | 5,693 | py | Python | controllers/transactions_controller.py | JeremyCodeClan/spentrack_project | 455074446b5b335ea77933c80c43745fcad1171c | [
"MIT"
] | null | null | null | controllers/transactions_controller.py | JeremyCodeClan/spentrack_project | 455074446b5b335ea77933c80c43745fcad1171c | [
"MIT"
] | null | null | null | controllers/transactions_controller.py | JeremyCodeClan/spentrack_project | 455074446b5b335ea77933c80c43745fcad1171c | [
"MIT"
] | null | null | null | from flask import Blueprint, Flask, render_template, request, redirect
from models.transaction import Transaction
import repositories.transaction_repository as transaction_repo
import repositories.merchant_repository as merchant_repo
import repositories.tag_repository as tag_repo
transactions_blueprint = Blueprint("transactions", __name__) | 40.664286 | 129 | 0.657298 |
c6f1fc0edc1a1464fe8ec814304b412c4369a1d8 | 86,261 | py | Python | Welcomer 6.20/modules/core.py | TheRockettek/Welcomer | 60706b4d6eec7d4f2500b3acc37530e42d846532 | [
"MIT"
] | 12 | 2019-09-10T21:31:51.000Z | 2022-01-21T14:31:05.000Z | Welcomer 6.20/modules/core.py | TheRockettek/Welcomer | 60706b4d6eec7d4f2500b3acc37530e42d846532 | [
"MIT"
] | null | null | null | Welcomer 6.20/modules/core.py | TheRockettek/Welcomer | 60706b4d6eec7d4f2500b3acc37530e42d846532 | [
"MIT"
] | 1 | 2021-09-17T09:03:54.000Z | 2021-09-17T09:03:54.000Z | import asyncio
import copy
import csv
import io
import math
from math import inf
import os
import sys
import time
import traceback
import logging
from importlib import reload
from datetime import datetime
import logging
import aiohttp
import discord
import requests
import json
import ujson
from discord.ext import commands
from rockutils import rockutils
import uuid
import handling
def should_cache(self, guildinfo):
return guildinfo['a']['e'] or len(
guildinfo['rr']) > 0 or guildinfo['tr']['e'] or guildinfo['am'][
'e'] or guildinfo['s']['e']
def get_emote(self, name, fallback=":grey_question:"):
if getattr(self.bot, "emotes", None) is None:
try:
data = rockutils.load_json("cfg/emotes.json")
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Failed to retrieve emotes.json: {e}",
prefix_colour="light red")
if not data:
guild = self.bot.get_guild(
self.bot.config['bot']['emote_server'])
if guild:
emotes = self.bot.serialiser.emotes(guild)
if emotes[0]:
emotelist = {}
for emote in emotes:
emotelist[emote['name']] = emote['str']
rockutils.save_json("cfg/emotes.json", emotelist)
else:
self.bot.blocking_broadcast(
"emotesdump", "*", args="", timeout=10)
while not os.path.exists("cfg/emotes.json"):
try:
data = rockutils.load_json("cfg/emotes.json")
except BaseException:
pass
setattr(self.bot, "emotes", emotelist)
else:
setattr(self.bot, "emotes", data)
# # sometimes will save it as a list with a table inside, precaution
# if type(self.bot.emotes) == list:
# setattr(self.bot, "emotes", self.bot.emotes[0])
return self.bot.emotes.get(name, fallback)
def setup(bot):
caches = [
"prefix",
"guilddetails",
"rules",
"analytics",
"channels",
"serverlock",
"staff",
"tempchannel",
"autorole",
"rolereact",
"leaver",
"freerole",
"timeroles",
"namepurge",
"welcomer",
"stats",
"automod",
"borderwall",
"customcommands",
"music",
"polls",
"logging",
"moderation",
"activepunishments"
]
for name in caches:
existingdict(bot.cache, name, {})
core = WelcomerCore(bot)
for key in dir(core):
if not ("on_" in key[:3] and key != "on_message_handle"):
value = getattr(core, key)
if callable(value) and "_" not in key[0]:
setattr(bot, key, value)
if not hasattr(bot, key):
print(f"I called set for {key} but its not set now")
bot.remove_command("help")
bot.add_cog(core)
if not hasattr(bot, "chunkcache"):
setattr(bot, "chunkcache", {})
if not hasattr(bot, "lockcache"):
setattr(bot, "lockcache", {})
setattr(bot, "ranonconnect", False)
setattr(bot, "cachemutex", False)
setattr(bot, "serialiser", DataSerialiser(bot))
setattr(bot, "emotes", rockutils.load_json("cfg/emotes.json"))
default_data = rockutils.load_json("cfg/default_user.json")
setattr(bot, "default_user", default_data)
default_data = rockutils.load_json("cfg/default_guild.json")
setattr(bot, "default_guild", default_data)
bot.reload_data("cfg/config.json", "config")
reload(handling)
| 42.222712 | 327 | 0.48417 |
c6f49b93679334772aa9bf531c4d72e0b150e6e1 | 1,225 | py | Python | evalml/tests/data_checks_tests/test_utils.py | Mahesh1822/evalml | aa0ec2379aeba12bbd0dcaaa000f9a2a62064169 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/data_checks_tests/test_utils.py | Mahesh1822/evalml | aa0ec2379aeba12bbd0dcaaa000f9a2a62064169 | [
"BSD-3-Clause"
] | 1 | 2022-02-19T12:59:09.000Z | 2022-02-19T12:59:09.000Z | evalml/tests/data_checks_tests/test_utils.py | Mahesh1822/evalml | aa0ec2379aeba12bbd0dcaaa000f9a2a62064169 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from evalml.data_checks import DataCheckActionCode
from evalml.data_checks.utils import handle_data_check_action_code
from evalml.problem_types import ProblemTypes
| 34.027778 | 99 | 0.755102 |
c6f503162b0ef4701efc6276ebdf2a288cdafb1f | 3,480 | py | Python | figures/bothspectra.py | DanielAndreasen/Paper-updated-nir-linelist | a4094a1d73a58c1ee1597c6df8a11b0b9ce17777 | [
"MIT"
] | null | null | null | figures/bothspectra.py | DanielAndreasen/Paper-updated-nir-linelist | a4094a1d73a58c1ee1597c6df8a11b0b9ce17777 | [
"MIT"
] | null | null | null | figures/bothspectra.py | DanielAndreasen/Paper-updated-nir-linelist | a4094a1d73a58c1ee1597c6df8a11b0b9ce17777 | [
"MIT"
] | null | null | null | from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('paper', font_scale=1.7)
from plot_fits import get_wavelength, dopplerShift
from scipy.interpolate import interp1d
plt.rcParams['xtick.direction'] = 'in'
"""
Compare the spectrum of Arcturus with 10 Leo, plus have some Fe lines
identified.
"""
if __name__ == '__main__':
regions = [[10000, 10100], [10130, 10230], [12200, 12300]]
lines = np.loadtxt('Felines.moog', usecols=(0,))
wArcturus = get_wavelength(fits.getheader('ArcturusSummer.fits'))
fArcturus = fits.getdata('ArcturusSummer.fits')
w10Leo1 = get_wavelength(fits.getheader('10LeoYJ.fits'))
f10Leo1 = fits.getdata('10LeoYJ.fits')
w10Leo2 = get_wavelength(fits.getheader('10LeoH.fits'))
f10Leo2 = fits.getdata('10LeoH.fits')
w10Leo3 = get_wavelength(fits.getheader('10LeoK.fits'))
f10Leo3 = fits.getdata('10LeoK.fits')
f10Leo1, w10Leo1 = dopplerShift(w10Leo1, f10Leo1, -82.53)
f10Leo2, w10Leo2 = dopplerShift(w10Leo2, f10Leo2, -81.82)
f10Leo3, w10Leo3 = dopplerShift(w10Leo3, f10Leo3, -81.37)
for i, region in enumerate(regions):
if i != 1:
continue
if (w10Leo1[0] <= region[0]) and (w10Leo1[-1] >= region[1]):
w10Leo = w10Leo1
f10Leo = f10Leo1
elif (w10Leo2[0] <= region[0]) and (w10Leo2[-1] >= region[1]):
w10Leo = w10Leo2
f10Leo = f10Leo2
elif (w10Leo3[0] <= region[0]) and (w10Leo3[-1] >= region[1]):
w10Leo = w10Leo3
f10Leo = f10Leo3
else:
continue
i1 = (region[0] <= wArcturus) & (wArcturus <= region[1])
i2 = (region[0] <= w10Leo) & (w10Leo <= region[1])
i3 = (region[0] <= lines) & (lines <= region[1])
w1, f1 = wArcturus[i1], fArcturus[i1]
w2, f2 = w10Leo[i2], f10Leo[i2]
plines = lines[i3]
w0 = w1[0] if w1[0] != min((w1[0], w2[0])) else w2[0]
wn = w1[-1] if w1[-1] != max((w1[-1], w2[-1])) else w2[-1]
interp1 = interp1d(w1, f1, kind='linear')
interp2 = interp1d(w2, f2, kind='linear')
w = np.linspace(w0, wn, len(w1))
f1 = interp1(w)
f2 = interp2(w)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.tick_params('y', labelcolor='w', left='off')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.plot(w, f1, label='Arcturus')
ax.plot(w, f2-0.15, label='10 Leo')
ax.plot(w, f1-f2+0.15, label='Difference')
for j, line in enumerate(plines):
if j%2 == 0:
dy = -0.02
else:
dy = 0.02
if j == 6:
dy = 0.02
elif j == 7:
dy = -0.02
ymin = get_ymin(line, (w1, f1), (w2, f2))
plt.vlines(line, ymin, 1.04+dy, linestyles='dashed')
plt.text(line, 1.04+dy, 'Fe')
ax.set_xlabel(r'Wavelength [$\AA$]')
ax.set_ylabel('Normalized flux')
y1, _ = plt.ylim()
plt.ylim(y1, 1.15)
plt.legend(loc='best', frameon=False)
plt.tight_layout()
# plt.savefig('bothspectra.pdf')
plt.show()
| 31.926606 | 70 | 0.561494 |
c6f5b57e9157f7c17bb6f3082af0b5d89d425e82 | 298 | py | Python | main.py | pesikj/DataAnalysisUsingPython | 00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a | [
"MIT"
] | null | null | null | main.py | pesikj/DataAnalysisUsingPython | 00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a | [
"MIT"
] | null | null | null | main.py | pesikj/DataAnalysisUsingPython | 00269a7a7b5388fbbdcf3ddadd951a80a07f9c3a | [
"MIT"
] | null | null | null | from statistical_hypothesis_testing.plots import plots_z_test
from statistical_hypothesis_testing.tails import Tail
#plots_z_test.create_critical_region_plot(alphas=[0.1, 0.05, 0.01], tails=Tail.RIGHT_TAILED)
plots_z_test.create_p_value_plot(0.5109,alpha=0.05,lang='cs', tails=Tail.RIGHT_TAILED)
| 42.571429 | 92 | 0.842282 |
c6f5b6dd280b07a2399dbf6e91ec39c3acaaae3c | 3,471 | py | Python | projects/migrations/0001_initial.py | Zefarak/illidius_plan | 78dd9cc4da374ff88fc507e4870712d87e9ff6c3 | [
"MIT"
] | 1 | 2019-02-18T14:31:57.000Z | 2019-02-18T14:31:57.000Z | projects/migrations/0001_initial.py | Zefarak/illidius_plan | 78dd9cc4da374ff88fc507e4870712d87e9ff6c3 | [
"MIT"
] | null | null | null | projects/migrations/0001_initial.py | Zefarak/illidius_plan | 78dd9cc4da374ff88fc507e4870712d87e9ff6c3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-21 04:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
| 47.547945 | 141 | 0.589455 |
c6f6ce9055d1d8634c3084a055d492122c9b4918 | 1,818 | py | Python | EnumLasso/paper/paper_thaliana.py | t-basa/LassoVariants | ead33ac83de19865a9553dbdda9a28aa5c781e44 | [
"MIT"
] | 12 | 2016-11-30T04:39:18.000Z | 2021-09-11T13:57:37.000Z | EnumLasso/paper/paper_thaliana.py | t-basa/LassoVariants | ead33ac83de19865a9553dbdda9a28aa5c781e44 | [
"MIT"
] | 2 | 2018-03-05T19:01:09.000Z | 2019-10-10T00:30:55.000Z | EnumLasso/paper/paper_thaliana.py | t-basa/LassoVariants | ead33ac83de19865a9553dbdda9a28aa5c781e44 | [
"MIT"
] | 6 | 2017-08-19T17:49:51.000Z | 2022-01-09T07:41:22.000Z | # -*- coding: utf-8 -*-
"""
@author: satohara
"""
import sys
sys.path.append('../')
import codecs
import numpy as np
import pandas as pd
from EnumerateLinearModel import EnumLasso
# data - x
fn = './data/call_method_32.b'
df = pd.read_csv(fn, sep=',', header=None)
data_id_x = np.array([int(v) for v in df.ix[1, 2:]])
gene_id = df.ix[2:, :1].values
gene_id = np.array([[int(v[0]), int(v[1])] for v in gene_id])
data = df.ix[2:, 2:].values
data[data=='-'] = 0
data[data=='A'] = 1
data[data=='T'] = 2
data[data=='G'] = 3
data[data=='C'] = 4
count = np.c_[np.sum(data == 1, axis=1), np.sum(data == 2, axis=1), np.sum(data == 3, axis=1), np.sum(data == 4, axis=1)]
c = np.argmax(count, axis=1) + 1
x = data.copy()
for i in range(data.shape[1]):
x[:, i] = 1 - (data[:, i] - c == 0)
# data - y
fn = './data/phenotype_published_raw.tsv'
with codecs.open(fn, 'r', 'Shift-JIS', 'ignore') as file:
df = pd.read_table(file, delimiter='\t')
y = df.ix[:, 41].values
# data - reordering, remove nan
idx = np.argsort(data_id_x)
x = x[:, idx]
idx = ~np.isnan(y)
x = x[:, idx].T
y = y[idx]
# data - training & test split
seed = 0
r = 0.8
np.random.seed(seed)
idx = np.random.permutation(x.shape[0])
m = int(np.round(x.shape[0] * r))
xte = x[idx[m:], :]
yte = y[idx[m:]]
x = x[idx[:m], :]
y = y[idx[:m]]
# EnumLasso
rho = 0.1
delta = 0.05
mdl = EnumLasso(rho=rho, warm_start=True, enumtype='k', k=50, delta=delta, save='paper_thaliana.npy', modeltype='regression', verbose=True)
mdl.fit(x, y)
print()
print('--- Enumerated Solutions ---')
print(mdl)
# evaluate
print('--- Mean Square Error / # of Non-zeros ---')
for i in range(len(mdl.obj_)):
a = mdl.a_[i]
b = mdl.b_[i]
z = xte.dot(a) + b
mse = np.mean((z - yte)**2)
print('Solution %3d: MSE = %f / NNZ = %d' % (i+1, mse, a.nonzero()[0].size))
| 24.90411 | 139 | 0.593509 |
c6f74625e459f6cfa2aca2f74b48bf8881d4641b | 8,309 | py | Python | lib/backup_service_client/models/bucket.py | sumedhpb/testrunner | 9ff887231c75571624abc31a3fb5248110e01203 | [
"Apache-2.0"
] | 14 | 2015-02-06T02:47:57.000Z | 2020-03-14T15:06:05.000Z | lib/backup_service_client/models/bucket.py | sumedhpb/testrunner | 9ff887231c75571624abc31a3fb5248110e01203 | [
"Apache-2.0"
] | 3 | 2019-02-27T19:29:11.000Z | 2021-06-02T02:14:27.000Z | lib/backup_service_client/models/bucket.py | sumedhpb/testrunner | 9ff887231c75571624abc31a3fb5248110e01203 | [
"Apache-2.0"
] | 155 | 2018-11-13T14:57:07.000Z | 2022-03-28T11:53:22.000Z | # coding: utf-8
"""
Couchbase Backup Service API
This is REST API allows users to remotely schedule and run backups, restores and merges as well as to explore various archives for all there Couchbase Clusters. # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Bucket):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.965625 | 178 | 0.563846 |
c6f93b1caf13cee134c81078e57fec4a501c2e10 | 1,618 | py | Python | funciones/app.py | christophermontero/estima-tu-proyecto | 19f533be203c9ac2c4383ded5a1664dd1d05d679 | [
"MIT"
] | 2 | 2021-05-29T16:57:17.000Z | 2021-06-13T18:39:24.000Z | funciones/app.py | christophermontero/estima-tu-proyecto | 19f533be203c9ac2c4383ded5a1664dd1d05d679 | [
"MIT"
] | 22 | 2021-05-22T18:23:40.000Z | 2021-12-18T21:09:59.000Z | funciones/app.py | christophermontero/estima-tu-proyecto | 19f533be203c9ac2c4383ded5a1664dd1d05d679 | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
from db import db_session, init_db
from model import Funcion
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
init_db()
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 25.68254 | 93 | 0.685414 |
c6f9a9602db33208c1f896b22af13200b9be42d9 | 309 | py | Python | onnx_script/check_onnx_model.py | abyssss52/pytorch-image-models | 6ed4124c610a73fc849e7e9567bc36cf5bf38ceb | [
"Apache-2.0"
] | null | null | null | onnx_script/check_onnx_model.py | abyssss52/pytorch-image-models | 6ed4124c610a73fc849e7e9567bc36cf5bf38ceb | [
"Apache-2.0"
] | null | null | null | onnx_script/check_onnx_model.py | abyssss52/pytorch-image-models | 6ed4124c610a73fc849e7e9567bc36cf5bf38ceb | [
"Apache-2.0"
] | null | null | null | import onnx
# Load the ONNX model
model = onnx.load("./mobilenetv2_new.onnx")
# model = onnx.load("../FaceAnti-Spoofing.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
print(model.graph)
| 25.75 | 52 | 0.76699 |
c6fa00680fcfe377a498032a4d31cbf4682bc376 | 1,071 | py | Python | 2015/07/puzzle2.py | jsvennevid/adventofcode | c6d5e3e3a166ffad5e8a7cc829599f49607a1efe | [
"MIT"
] | null | null | null | 2015/07/puzzle2.py | jsvennevid/adventofcode | c6d5e3e3a166ffad5e8a7cc829599f49607a1efe | [
"MIT"
] | null | null | null | 2015/07/puzzle2.py | jsvennevid/adventofcode | c6d5e3e3a166ffad5e8a7cc829599f49607a1efe | [
"MIT"
] | null | null | null | import re
wires = {}
for i in open('day7.txt'):
set = re.match(r'([a-z0-9]+) -> ([a-z]+)',i)
if set:
wires[set.group(2)] = set.group(1)
op1 = re.match(r'(NOT) ([a-z0-9]+) -> ([a-z]+)',i)
if op1:
wires[op1.group(3)] = [op1.group(1), op1.group(2)]
op2 = re.match(r'([a-z0-9]+) (AND|OR|LSHIFT|RSHIFT) ([a-z0-9]+) -> ([a-z]+)',i)
if op2:
wires[op2.group(4)] = [op2.group(2), op2.group(1), op2.group(3)]
wires['b'] = str(visit('a', {}))
print 'a:', visit('a', {})
| 31.5 | 80 | 0.5845 |
c6fa99e51df1893798f6cb4d6c3cbd2091fbf05a | 7,167 | py | Python | src/visualization/plot_grid.py | davimnz/boa | 0546ad4df0ecabec1fd3beb1264cd0930dce13a9 | [
"MIT"
] | null | null | null | src/visualization/plot_grid.py | davimnz/boa | 0546ad4df0ecabec1fd3beb1264cd0930dce13a9 | [
"MIT"
] | null | null | null | src/visualization/plot_grid.py | davimnz/boa | 0546ad4df0ecabec1fd3beb1264cd0930dce13a9 | [
"MIT"
] | null | null | null | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from math import cos, radians
def shift_position(pos, x_shift, y_shift) -> dict:
"""
Moves nodes' position by (x_shift, y_shift)
"""
return {n: (x + x_shift, y + y_shift) for n, (x, y) in pos.items()}
def convert_to_2d(latitude, longitude, center_latitude=50.0):
"""
Converts (lat, long) to (x, y) using approximation for small areas.
"""
earth_radius = 6373.0 # unit : km
aspect_ratio = radians(center_latitude)
x = earth_radius * longitude * cos(aspect_ratio)
y = earth_radius * latitude
return x, y
def plot_stock_grid(data, position, supply_site_code,
sku_code, balance=False) -> None:
"""
Plots a map containing the amount of stock in each location of a given
grid: Hub, Depot or Distributor.
"""
grid_table = data[(data['Supply Site Code'] == supply_site_code)]
grid_table = grid_table[(grid_table['SKU'] == sku_code)]
stock_mean = []
positions = {}
labels = {}
colors = []
color_dict = {"DEP": "#3f60e1",
"DIST": "#60e13f",
"HUB": "#e13f60",
"DEPOT": '#3f60e1'}
location_index = grid_table.columns.to_list().index('Location Code')
if balance:
stock_index = grid_table.columns.to_list().index('x_opt')
else:
stock_index = grid_table.columns.to_list().index('Closing Stock')
type_index = grid_table.columns.to_list().index('Location Type')
reorder_index = grid_table.columns.to_list().index('Reorder Point (Hl)')
for row in grid_table.itertuples():
location_code = row[location_index + 1]
stock = round(100 * row[stock_index + 1]
/ row[reorder_index + 1]) / 100
stock_mean.append(stock)
type = row[type_index + 1]
if location_code == supply_site_code:
color = color_dict["HUB"]
colors.append(color)
else:
color = color_dict[type]
colors.append(color)
position_row = position[position['code'] == location_code]
latitude = position_row['latitude']
longitude = position_row['longitude']
position_2d = convert_to_2d(latitude, longitude)
positions[location_code] = position_2d
labels[location_code] = stock
positions_nodes = shift_position(positions, 0, 500)
print(np.mean(stock_mean))
grid = nx.Graph()
for key, value in labels.items():
grid.add_node(key, stock=value)
nx.draw_networkx(grid, pos=positions, with_labels=False,
node_size=350, node_color=colors)
nx.draw_networkx_labels(grid, pos=positions_nodes,
labels=labels, font_size=16)
ylim = plt.ylim()
plt.ylim(0.99 * ylim[0], 1.01 * ylim[1])
dep_legend = mpatches.Patch(color=color_dict["DEP"], label='Depsito')
dist_legend = mpatches.Patch(color=color_dict["DIST"], label='CDD')
hub_legend = mpatches.Patch(color=color_dict["HUB"], label="Hub")
plt.legend(handles=[dep_legend, dist_legend, hub_legend], fontsize=20)
plt.axis('off')
plt.show()
def plot_exchange_map(data, exchange, position,
supply_site_code, sku_code) -> None:
"""
Plots the optimal exchange map for a given grid.
"""
exchange_table = exchange[(
exchange['Supply Site Code'] == supply_site_code)]
exchange_table = exchange_table[(exchange_table['SKU'] == sku_code)]
grid_table = data[(data['Supply Site Code'] == supply_site_code)]
grid_table = grid_table[(grid_table['SKU'] == sku_code)]
labels = {'Hub': 'Hub'}
colors = {}
color_dict = {"DEP": "#3f60e1", "DIST": "#60e13f", "HUB": "#e13f60"}
location_index = grid_table.columns.to_list().index('Location Code')
type_index = grid_table.columns.to_list().index('Location Type')
for row in grid_table.itertuples():
location_code = row[location_index + 1]
type = row[type_index + 1]
if location_code == supply_site_code:
color = color_dict["HUB"]
colors[location_code] = color
else:
color = color_dict[type]
colors[location_code] = color
labels[location_code] = location_code
grid = nx.DiGraph()
for key, value in labels.items():
grid.add_node(key, stock=value)
nodes_with_edges = []
origin_index = exchange_table.columns.to_list().index('Origin')
destiny_index = exchange_table.columns.to_list().index('Destiny')
amount_index = exchange_table.columns.to_list().index('Amount')
for row in exchange_table.itertuples():
origin = row[origin_index + 1]
destiny = row[destiny_index + 1]
amount = round(row[amount_index + 1])
if origin == "Available":
origin = supply_site_code
if destiny == supply_site_code:
destiny = 'Hub'
colors['Hub'] = colors[supply_site_code]
grid.add_edge(origin, destiny, weight=amount)
nodes_with_edges.append(origin)
nodes_with_edges.append(destiny)
layout = nx.planar_layout(grid)
layout_label = shift_position(layout, -0.03, 0.03)
nodes_with_edges = list(set(nodes_with_edges))
nodes_colors = []
nodes_labels = {}
for node in nodes_with_edges:
nodes_colors.append(colors[node])
nodes_labels[node] = labels[node]
nx.draw_networkx(grid, layout, node_color=nodes_colors,
nodelist=nodes_with_edges, with_labels=False,
arrowsize=20, node_size=400)
grid_edge_labels = nx.get_edge_attributes(grid, 'weight')
nx.draw_networkx_edge_labels(grid, layout,
edge_labels=grid_edge_labels)
nx.draw_networkx_labels(grid, pos=layout_label, labels=nodes_labels)
dep_legend = mpatches.Patch(color=color_dict["DEP"], label='Depsito')
dist_legend = mpatches.Patch(color=color_dict["DIST"], label='CDD')
hub_legend = mpatches.Patch(color=color_dict["HUB"], label="Hub")
plt.legend(handles=[dep_legend, dist_legend, hub_legend], fontsize=20)
plt.axis('off')
plt.show()
if __name__ == "__main__":
unbalanced = pd.read_csv('data/data.csv', delimiter=';', decimal=',')
balanced = pd.read_csv('output/distribution_output_cvxopt.csv',
delimiter=';', decimal=',')
position = pd.read_csv('data/geopositioning.csv',
delimiter=';', decimal=',')
exchange = pd.read_csv('output/exchanges_output.csv',
delimiter=';', decimal=',')
# choose which grid to plot. The grid cannot be scenario 0
supply_site_code = 'PL-1721'
sku_code = 85023
# plots unbalanced grid, balanced grid, and exchange map
plot_stock_grid(unbalanced, position, supply_site_code, sku_code)
plot_stock_grid(balanced, position, supply_site_code,
sku_code, balance=True)
plot_exchange_map(unbalanced, exchange, position,
supply_site_code, sku_code)
| 34.960976 | 76 | 0.635412 |
c6fb2216661678548d14f34f7328e08d3f4c59ba | 1,254 | py | Python | my_project/urls.py | stripathi669/codepal-sample-login | f553cc7f7794dd20197b1df336ed7953ac7a62dc | [
"MIT"
] | 2 | 2017-04-23T08:54:09.000Z | 2017-12-19T17:51:38.000Z | my_project/urls.py | stripathi669/codepal-sample-login | f553cc7f7794dd20197b1df336ed7953ac7a62dc | [
"MIT"
] | null | null | null | my_project/urls.py | stripathi669/codepal-sample-login | f553cc7f7794dd20197b1df336ed7953ac7a62dc | [
"MIT"
] | 1 | 2019-10-01T17:51:13.000Z | 2019-10-01T17:51:13.000Z | """my_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework_jwt.views import obtain_jwt_token
from registration.views import register_user_via_facebook, get_user_details
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-token-auth/', obtain_jwt_token),
# Url for facebook signup
url(r'^api/v1/user/register/facebook', register_user_via_facebook),
# Url to fetch user details
url(r'^api/v1/user/get/account', get_user_details),
url(r'^$', TemplateView.as_view(template_name='home.html')),
]
| 32.153846 | 79 | 0.725678 |
c6fb42ccff41d5e02e75ca92305085547bd5ee39 | 3,870 | py | Python | datascripts/make_placescsv.py | NCI-NAACCR-Zone-Design/Louisiana | 42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07 | [
"MIT"
] | null | null | null | datascripts/make_placescsv.py | NCI-NAACCR-Zone-Design/Louisiana | 42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07 | [
"MIT"
] | 1 | 2020-03-05T23:20:38.000Z | 2020-03-10T18:03:31.000Z | datascripts/make_placescsv.py | NCI-NAACCR-Zone-Design/Louisiana | 42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07 | [
"MIT"
] | null | null | null | #!/bin/env python3
from osgeo import ogr
import os
import csv
import settings
if __name__ == '__main__':
PlacesIntersector().run()
print("DONE")
| 39.896907 | 156 | 0.62093 |
c6fd01691eb418ac4d1818fca0bd68461092ddaa | 580 | py | Python | Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | # scrapes both regular and shopping ads (top, right blocks)
from serpapi import GoogleSearch
import json, os
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google",
"q": "buy coffee",
"gl": "us",
"hl": "en"
}
search = GoogleSearch(params)
results = search.get_dict()
if results.get("ads", []):
for ad in results["ads"]:
print(json.dumps(ad, indent=2))
if results.get("shopping_results", []):
for shopping_ad in results["shopping_results"]:
print(json.dumps(shopping_ad, indent=2))
else:
print("no shopping ads found.")
| 22.307692 | 59 | 0.639655 |
c6fd244b6ad93e904d3cfe0db3dd28977bc63c93 | 3,316 | py | Python | tomomibot/commands/start.py | adzialocha/tomomibot | ed3964223bd63340f28d36daa014865f61aaf571 | [
"MIT"
] | 28 | 2018-07-26T09:47:32.000Z | 2022-01-24T10:38:13.000Z | tomomibot/commands/start.py | adzialocha/tomomibot | ed3964223bd63340f28d36daa014865f61aaf571 | [
"MIT"
] | null | null | null | tomomibot/commands/start.py | adzialocha/tomomibot | ed3964223bd63340f28d36daa014865f61aaf571 | [
"MIT"
] | 5 | 2018-08-11T08:07:23.000Z | 2021-12-23T14:47:40.000Z | import click
from tomomibot.cli import pass_context
from tomomibot.runtime import Runtime
from tomomibot.utils import check_valid_voice, check_valid_model
from tomomibot.const import (INTERVAL_SEC, INPUT_DEVICE, OUTPUT_CHANNEL,
INPUT_CHANNEL, OUTPUT_DEVICE, SAMPLE_RATE,
THRESHOLD_DB, NUM_CLASSES_SOUNDS,
SEQ_LEN, TEMPERATURE,
PENALTY, VOLUME, OSC_ADDRESS, OSC_PORT)
| 39.011765 | 77 | 0.596803 |
059afd391bdb4d5d0ce5e8f183cba9cadeed7065 | 3,451 | py | Python | state/GameState.py | philippehenri-gosselin/tankgame | ceabbee7c348bfd4c95d2ee2ae0015d6d761154b | [
"X11"
] | 4 | 2020-09-15T02:00:39.000Z | 2021-05-11T17:23:28.000Z | state/GameState.py | philippehenri-gosselin/tankgame | ceabbee7c348bfd4c95d2ee2ae0015d6d761154b | [
"X11"
] | null | null | null | state/GameState.py | philippehenri-gosselin/tankgame | ceabbee7c348bfd4c95d2ee2ae0015d6d761154b | [
"X11"
] | null | null | null | """
MIT License
Copyrights 2020, Philippe-Henri Gosselin.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the Software), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The Software is provided as is, without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability, fitness
for a particular purpose and noninfringement. In no event shall the authors or
copyright holders be liable for any claim, damages or other liability, whether
in an action of contract, tort or otherwise, arising from, out of or in
connection with the software or the use or other dealings in the Software.
Except as contained in this notice, the name of Philippe-Henri Gosselin shall
not be used in advertising or otherwise to promote the sale, use or other
dealings in this Software without prior written authorization from
Philippe-Henri Gosselin.
"""
from .Unit import Unit
from pygame.math import Vector2
def findUnit(self,position):
"""
Returns the index of the first unit at position, otherwise None.
"""
for unit in self.units:
if int(unit.position.x) == int(position.x) \
and int(unit.position.y) == int(position.y):
return unit
return None
def findLiveUnit(self,position):
"""
Returns the index of the first live unit at position, otherwise None.
"""
unit = self.findUnit(position)
if unit is None or unit.status != "alive":
return None
return unit
def addObserver(self,observer):
"""
Add a game state observer.
All observer is notified when something happens (see GameStateObserver class)
"""
self.observers.append(observer)
| 34.51 | 85 | 0.654593 |
059b0412d51d78feb8e9b2b1008cb427fb6c0e11 | 5,516 | py | Python | Bot/commands_handling/group_commands.py | DogsonPl/bot_for_messenger | 2d6664b52b59696dc82efb3d361b7700ebb3960b | [
"MIT"
] | 19 | 2021-03-11T12:59:00.000Z | 2022-02-12T18:50:58.000Z | Bot/commands_handling/group_commands.py | DogsonPl/bot_for_messenger | 2d6664b52b59696dc82efb3d361b7700ebb3960b | [
"MIT"
] | null | null | null | Bot/commands_handling/group_commands.py | DogsonPl/bot_for_messenger | 2d6664b52b59696dc82efb3d361b7700ebb3960b | [
"MIT"
] | 4 | 2021-03-10T23:07:13.000Z | 2021-09-28T18:55:30.000Z | import fbchat
import random as rd
from .logger import logger
from ..bot_actions import BotActions
from ..sql import handling_group_sql
BOT_WELCOME_MESSAGE = """ Witajcie, jestem botem
Jeli chcesz zobaczy moje komendy napisz !help"""
| 41.787879 | 134 | 0.676215 |
059f84fb457661f2a82136d2fab085f6c614dd8f | 1,100 | py | Python | util/file_parsing.py | LindaSt/BT-graph-creation | a6aa4d0ca42db4744150f11f17aea7e98d391319 | [
"MIT"
] | 1 | 2022-03-09T07:28:14.000Z | 2022-03-09T07:28:14.000Z | util/file_parsing.py | LindaSt/BT-graph-creation | a6aa4d0ca42db4744150f11f17aea7e98d391319 | [
"MIT"
] | null | null | null | util/file_parsing.py | LindaSt/BT-graph-creation | a6aa4d0ca42db4744150f11f17aea7e98d391319 | [
"MIT"
] | null | null | null | import os
import xml.etree.ElementTree as ET
| 36.666667 | 106 | 0.59 |
05a1b225db67c9294be8ffcb48b01e142b5fd38c | 51,802 | py | Python | python source files/trainer.py | barneyga/A-Recurrent-Model-of-Approximate-Enumeration | 8a0ca5094a2e180939c25e55f376f30dfa1095bd | [
"MIT"
] | null | null | null | python source files/trainer.py | barneyga/A-Recurrent-Model-of-Approximate-Enumeration | 8a0ca5094a2e180939c25e55f376f30dfa1095bd | [
"MIT"
] | 1 | 2021-12-08T00:52:53.000Z | 2021-12-08T00:52:53.000Z | python source files/trainer.py | barneyga/A-Recurrent-Model-of-Approximate-Enumeration | 8a0ca5094a2e180939c25e55f376f30dfa1095bd | [
"MIT"
] | null | null | null | import os
import time
import shutil
import pickle
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tensorboard_logger import configure, log_value
import pandas as pd
from model import RecurrentAttention
from stop_model import StopRecurrentAttention
from utils import AverageMeter
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
def train_one_epoch(self, epoch):
"""
Train the model for 1 epoch of the training set.
An epoch corresponds to one full pass through the entire
training set in successive mini-batches.
This is used by train() and should not be called manually.
"""
self.model.train()
batch_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
tic = time.time()
with tqdm(total=self.num_train) as pbar:
for i, (x, y) in enumerate(self.train_loader):
self.optimizer.zero_grad()
x, y = x.to(self.device), y.to(self.device, dtype=torch.int64)
plot = False
if (epoch % self.plot_freq == 0) and (i == 0):
plot = True
# initialize location vector and hidden state
self.batch_size = x.shape[0]
#h_t, l_t, s_t = self.reset()
h_t, l_t = self.reset()
# save images
imgs = []
imgs.append(x[0:9])
# extract the glimpses
locs = []
l_log_pi = []
#s_log_pi = []
baselines = []
log_probas = []
#stop_signals = []
for t in range(self.num_glimpses):
# forward pass through model
#h_t, l_t, s_t, b_t, log_ps, l_p, s_p = self.model(x, l_t, h_t, s_t, t == self.num_glimpses - 1)
h_t, l_t, b_t, log_ps, l_p = self.model(x, l_t, h_t, t == self.num_glimpses - 1)
# store
locs.append(l_t[0:9])
baselines.append(b_t)
l_log_pi.append(l_p)
#s_log_pi.append(s_p)
log_probas.append(log_ps)
#stop_signals.append(s_t)
# # last iteration
# h_t, l_t, b_t, log_probas, p = self.model(x, l_t, h_t, last=True)
# log_pi.append(p)
# baselines.append(b_t)
# locs.append(l_t[0:9])
# convert list to tensors and reshape
baselines = torch.stack(baselines).transpose(1, 0)
l_log_pi = torch.stack(l_log_pi).transpose(1, 0)
#s_log_pi = torch.stack(s_log_pi).transpose(1, 0)
log_probas = torch.stack(log_probas).transpose(1, 0)
#stop_signals = torch.stack(stop_signals).transpose(1, 0).squeeze(2)
#process stop signals
#up_through_stop = stop_signals
#count = torch.arange(self.batch_size)
#num_steps = torch.sum(stop_signals, dim=1).long()
#up_through_stop[count,num_steps] += 1
#extract log_probas at first stop signal
#log_probas = log_probas[count,num_steps,:]
#clip histories after stop signal
#baselines = baselines * up_through_stop
#l_log_pi = l_log_pi * up_through_stop
#s_log_pi = s_log_pi * up_through_stop
# calculate reward
predicted = torch.max(log_probas, 2)[1]
repeat_y = y.unsqueeze(1).repeat(1, self.num_glimpses)
R = (predicted.detach() == repeat_y).float()
#R = R.unsqueeze(1).repeat(1, self.num_glimpses)
#mask = (torch.arange(R.size(1), device=num_steps.device)==num_steps.unsqueeze(1))
#R = mask*R #Reward of 1 at first stop signal
#R = R - stop_signals * self.hesitation_penalty
# compute losses for differentiable modules
#loss_action = F.nll_loss(log_probas, y)
loss_action = F.nll_loss(log_probas.reshape(self.batch_size * self.num_glimpses, -1), repeat_y.reshape(self.batch_size*self.num_glimpses))
loss_baseline = F.mse_loss(baselines, R)
# compute reinforce loss
# summed over timesteps and averaged across batch
adjusted_reward = R - baselines.detach()
loss_reinforce = torch.sum(-l_log_pi * adjusted_reward, dim=1) #+ torch.sum(-s_log_pi * adjusted_reward, dim=1)
loss_reinforce = torch.mean(loss_reinforce, dim=0)
# sum up into a hybrid loss
loss = loss_action + loss_baseline + loss_reinforce * 0.01
# compute accuracy
correct = (predicted[:,-1] == y).float()
acc = 100 * (correct.sum() / len(y))
# store
losses.update(loss.item(), x.size()[0])
accs.update(acc.item(), x.size()[0])
# compute gradients and update SGD
loss.backward()
self.optimizer.step()
# measure elapsed time
toc = time.time()
batch_time.update(toc - tic)
pbar.set_description(
(
"{:.1f}s - loss: {:.3f} - acc: {:.3f}".format(
(toc - tic), loss.item(), acc.item()
)
)
)
pbar.update(self.batch_size)
# dump the glimpses and locs
if plot:
imgs = [g.cpu().data.numpy().squeeze() for g in imgs]
locs = [l.cpu().data.numpy() for l in locs]
pickle.dump(
imgs, open(self.plot_dir + "g_{}.p".format(epoch + 1), "wb")
)
pickle.dump(
locs, open(self.plot_dir + "l_{}.p".format(epoch + 1), "wb")
)
# log to tensorboard
if self.use_tensorboard:
iteration = epoch * len(self.train_loader) + i
log_value("train_loss", losses.avg, iteration)
log_value("train_acc", accs.avg, iteration)
return losses.avg, accs.avg
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
def save_checkpoint(self, state, is_best):
"""Saves a checkpoint of the model.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
filename = self.model_name + "_ckpt.pth.tar"
model_path = os.path.join(self.model_dir, filename)
torch.save(state, model_path)
if is_best:
filename = self.model_name + "_model_best.pth.tar"
shutil.copyfile(model_path, os.path.join(self.model_dir, filename))
def load_checkpoint(self, best=False):
"""Load the best copy of a model.
This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Args:
best: if set to True, loads the best model.
Use this if you want to evaluate your model
on the test data. Else, set to False in which
case the most recent version of the checkpoint
is used.
"""
print("[*] Loading model from {}".format(self.model_dir))
filename = self.model_name + "_ckpt.pth.tar"
if best:
filename = self.model_name + "_model_best.pth.tar"
model_path = os.path.join(self.model_dir, filename)
model = torch.load(model_path)
# load variables from checkpoint
self.start_epoch = model["epoch"]
self.best_valid_acc = model["best_valid_acc"]
self.model.load_state_dict(model["model_state"])
self.optimizer.load_state_dict(model["optim_state"])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, model["epoch"], model["best_valid_acc"]
)
)
else:
print("[*] Loaded {} checkpoint @ epoch {}".format(filename, model["epoch"]))
| 40.032457 | 154 | 0.572661 |
05a68fa246d27153d4fabeb9ddac94a69fd17785 | 392 | py | Python | src/apps/shop/serializers.py | brainfukk/fiuread | 7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8 | [
"Apache-2.0"
] | null | null | null | src/apps/shop/serializers.py | brainfukk/fiuread | 7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8 | [
"Apache-2.0"
] | null | null | null | src/apps/shop/serializers.py | brainfukk/fiuread | 7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from .models import ShopItem
| 26.133333 | 70 | 0.706633 |
05a722d6a74837776cdd4f147e146b4674a0d013 | 2,205 | py | Python | app.py | limjierui/money-goose-telebot | bf048e27598b9ff6da580ee62309c4ca33eae0c5 | [
"MIT"
] | null | null | null | app.py | limjierui/money-goose-telebot | bf048e27598b9ff6da580ee62309c4ca33eae0c5 | [
"MIT"
] | null | null | null | app.py | limjierui/money-goose-telebot | bf048e27598b9ff6da580ee62309c4ca33eae0c5 | [
"MIT"
] | 3 | 2020-12-21T16:21:45.000Z | 2020-12-24T16:21:28.000Z | from flask import Flask, request
import telegram
from moneyGooseBot.master_mind import mainCommandHandler
from moneyGooseBot.credentials import URL, reset_key, bot_token, bot_user_name
from web_server import create_app
# https://api.telegram.org/bot1359229669:AAEm8MG26qbA9XjJyojVKvPI7jAdMVqAkc8/getMe
bot = telegram.Bot(token=bot_token)
app = create_app()
if __name__ == '__main__':
# note the threaded arg which allow
# your app to have more than one thread
app.run(threaded=True, debug=True) | 30.625 | 86 | 0.686168 |
05aa26976885770e54982447eb4735e665e02cf2 | 3,061 | py | Python | final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py | mmwvh/ce | 162064eeb6668896410c9d176fe75531cd3493fb | [
"MIT"
] | 28 | 2021-04-08T15:59:56.000Z | 2022-03-12T20:42:16.000Z | final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py | mmwvh/ce | 162064eeb6668896410c9d176fe75531cd3493fb | [
"MIT"
] | 7 | 2020-08-25T07:58:01.000Z | 2020-09-12T20:44:12.000Z | final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py | mmwvh/ce | 162064eeb6668896410c9d176fe75531cd3493fb | [
"MIT"
] | 13 | 2020-02-13T18:25:57.000Z | 2022-03-01T11:27:12.000Z | import yaml
from collections import OrderedDict
yaml.add_constructor(u'tag:yaml.org,2002:omap', construct_odict)
def repr_pairs(dump, tag, sequence, flow_style=None):
"""This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple."""
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def repr_odict(dumper, data):
"""
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False)
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True)
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return repr_pairs(dumper, u'tag:yaml.org,2002:omap', data.iteritems())
yaml.add_representer(OrderedDict, repr_odict)
| 37.329268 | 90 | 0.613525 |
05ac654490e3084f2724bef66dfbbee9d64e72f4 | 10,609 | py | Python | app.py | isabella232/arrested-development | ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea | [
"FSFAP"
] | 1 | 2015-03-16T21:22:58.000Z | 2015-03-16T21:22:58.000Z | app.py | nprapps/arrested-development | ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea | [
"FSFAP"
] | 1 | 2021-02-24T06:08:41.000Z | 2021-02-24T06:08:41.000Z | app.py | isabella232/arrested-development | ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea | [
"FSFAP"
] | 2 | 2015-02-22T23:39:11.000Z | 2021-02-23T10:45:05.000Z | #!/usr/bin/env python
import json
from mimetypes import guess_type
import urllib
import envoy
from flask import Flask, Markup, abort, render_template, redirect, Response
import app_config
from models import Joke, Episode, EpisodeJoke, JokeConnection
from render_utils import flatten_app_config, make_context
app = Flask(app_config.PROJECT_NAME)
# Render LESS files on-demand
# Render JST templates on-demand
# Render application configuration
# Server arbitrary static files on-demand
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=app_config.DEBUG)
| 33.153125 | 112 | 0.624658 |
05ae582a0fb6d75889c4d858419450e634ed3a1d | 12,129 | py | Python | json_modify.py | Enacero/yaml-patch | 7270d431447c82d665622cc316f0941214e7eee2 | [
"MIT"
] | 2 | 2020-04-21T08:49:39.000Z | 2020-12-21T07:28:43.000Z | json_modify.py | Enacero/json_modify | 7270d431447c82d665622cc316f0941214e7eee2 | [
"MIT"
] | null | null | null | json_modify.py | Enacero/json_modify | 7270d431447c82d665622cc316f0941214e7eee2 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Oleksii Petrenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from copy import deepcopy
import json
import typing
import os
import yaml
__version__ = "1.0.1"
__license__ = "MIT"
__all__ = (
"apply_actions",
"apply_to_list",
"apply_to_dict",
"validate_action",
"validate_marker",
"apply_action",
"get_path",
"get_section",
"get_reader",
"find_section_in_list",
)
def get_reader(
file_name: str,
) -> typing.Callable[[typing.Any], typing.Iterable[typing.Any]]:
"""
Determine reader for file.
:param file_name: name of the file with source data
:return: function to read data from file
"""
ext = os.path.splitext(file_name)[-1]
if ext in [".yaml", "yml"]:
return yaml.safe_load
elif ext == ".json":
return json.load
raise ValueError("Cant determine reader for {} extension".format(ext))
def find_section_in_list(
section: typing.List[typing.Any], action: typing.Dict[str, typing.Any], key: str
) -> int:
"""
Find index of section in list
:param section: list, where we want to search
:param action: action dictionary
:param key: the key marker
:return: index of searched section
"""
key = key[1:]
if key.isdigit():
return int(key)
if key not in action:
raise KeyError("Action {}: marker {} not found in action".format(action, key))
compares = action[key]
for index, section in enumerate(section):
if all(section[compare["key"]] == compare["value"] for compare in compares):
return index
raise IndexError(
"Action {}: Value with {} filters not found".format(action, compares)
)
def get_path(action: typing.Dict[str, typing.Any], path_delim: str) -> typing.List[str]:
"""
Get path from action
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: list of keys
"""
path = action["path"]
if isinstance(path, str):
keys = [str(key) for key in action["path"].split(path_delim)]
return keys
elif isinstance(path, typing.List) and all(isinstance(key, str) for key in path):
return path
else:
raise TypeError(
"Action {}: path should be str or list of strings".format(action)
)
def get_section(
source_data: typing.Iterable[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> typing.Iterable[typing.Any]:
"""
Get section descried by action's path.
:param source_data: source data where to search
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: section from source_data described by path
"""
section = source_data
path = get_path(action, path_delim)
if not action["action"] == "add":
path = path[:-1]
for key in path:
key = key.strip()
if key.startswith("$"):
if not isinstance(section, typing.List):
raise TypeError(
"Action {}: section {} is not list".format(action, section)
)
section_index = find_section_in_list(section, action, key)
section = section[section_index]
else:
if not isinstance(section, typing.Dict):
raise TypeError(
"Action {}: section {} is not dict".format(action, section)
)
section = section[key]
return section
def apply_to_dict(
section: typing.Dict[str, typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to dictionary.
:param section: section on which action should be applied
:param action: action object that should be applied
:param path_delim: delimiter
"""
action_name = action["action"]
value = action.get("value")
if action_name == "add":
if isinstance(value, typing.Dict):
section.update(value)
else:
raise TypeError(
"Action {}: value for add operation on dict should "
"be of type dict".format(action)
)
else:
path = get_path(action, path_delim)
key = path[-1].strip()
if action_name == "replace":
section[key] = value
elif action_name == "delete":
if key not in section:
raise KeyError("Action {}: no such key {}".format(action, key))
del section[key]
elif action_name == "rename":
if key not in section:
raise KeyError("Action {}: no such key {}".format(action, key))
elif isinstance(value, str):
section[value] = section[key]
del section[key]
else:
raise TypeError(
"Action {}: for rename action on dict value "
"should be string".format(action)
)
def apply_to_list(
section: typing.List[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to list.
:param section: section on which action should be applied
:param action: action object that should be applied
:param path_delim: delimiter
"""
action_name = action["action"]
value = action.get("value")
if action_name == "add":
if isinstance(value, list):
section.extend(value)
else:
raise TypeError(
"Action {}: value for add operation on list should "
"be of type list".format(action)
)
else:
path = get_path(action, path_delim)
key = path[-1].strip()
section_index = find_section_in_list(section, action, key)
if action_name == "replace":
section[section_index] = value
elif action_name == "delete":
section.pop(section_index)
def apply_action(
section: typing.Iterable[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to selected section.
:param section: section to be modified
:param action: action object
:param path_delim: path delimiter. default is '/'
"""
if isinstance(section, typing.Dict):
apply_to_dict(section, action, path_delim)
elif isinstance(section, typing.List):
apply_to_list(section, action, path_delim)
else:
raise TypeError(
"Action {}: Section {} is not of type dict or list".format(action, section)
)
def validate_marker(action: typing.Dict[str, typing.Any], key: str) -> None:
"""
Validate marker from action's path.
:param action: action object
:param key: key that is used as marker
"""
key = key[1:]
marker = action.get(key)
if not marker:
raise KeyError(
"Action {}: marker {} should be defined in action".format(action, key)
)
if not isinstance(marker, typing.List):
raise TypeError(
"Action {}: marker {} should be of type list".format(action, key)
)
for search_filter in marker:
if not isinstance(search_filter, typing.Dict):
raise TypeError(
"Action {}: marker {} filters should be of type dict".format(
action, key
)
)
filter_key = search_filter.get("key")
filter_value = search_filter.get("value")
if not filter_key or not filter_value:
raise KeyError(
"Action {}: for marker {} key and value should be specified".format(
action, key
)
)
def validate_action(action: typing.Dict[str, typing.Any], path_delim: str) -> None:
"""
Validate action.
:param action: action object
:param path_delim: path delimiter
"""
action_name = action.get("action")
if not action_name:
raise KeyError("Action {}: key action is required".format(action))
path = action.get("path")
if not path:
raise KeyError("Action {}: key path is required".format(action))
path = get_path(action, path_delim)
for key in path:
if key.startswith("$") and not key[1:].isdigit():
validate_marker(action, key)
value = action.get("value")
if action_name in ["add", "replace", "rename"] and not value:
raise KeyError(
"Action {}: for {} action key value is required".format(action, action_name)
)
if action_name == "add":
key = path[-1]
if key.startswith("$") and not isinstance(value, typing.List):
raise TypeError(
"Action {}: for add action on list value should be list".format(action)
)
elif not isinstance(value, typing.Dict):
raise TypeError(
"Action {}: for add action on dict value should be dict".format(action)
)
elif action_name == "rename":
if not isinstance(value, str):
raise TypeError(
"Action {}: for rename action on dict value should be string".format(
action
)
)
def apply_actions(
source: typing.Union[typing.Dict[str, typing.Any], str],
actions: typing.Union[typing.List[typing.Dict[str, typing.Any]], str],
copy: bool = False,
path_delim: str = "/",
) -> typing.Iterable[typing.Any]:
"""
Apply actions on source_data.
:param source: dictionary or json/yaml file with data that should be modified
:param actions: list or json/yaml file with actions, that should be applied to
source
:param copy: should source be copied before modification or changed in place
(works only when source is dictionary not file). default is False
:param path_delim: path delimiter. default is '/'
:return: source modified after applying actions
"""
if isinstance(source, str):
reader = get_reader(source)
with open(source, "r") as f:
source_data = reader(f)
elif isinstance(source, typing.Dict):
if copy:
source_data = deepcopy(source)
else:
source_data = source
else:
raise TypeError("source should be data dictionary or file_name with data")
if isinstance(actions, str):
reader = get_reader(actions)
with open(actions, "r") as f:
actions_data = reader(f)
elif isinstance(actions, typing.List):
actions_data = actions
else:
raise TypeError(
"actions should be data dictionary or file_name with actions list"
)
for action in actions_data:
validate_action(action, path_delim)
for action in actions_data:
section = get_section(source_data, action, path_delim)
apply_action(section, action, path_delim)
return source_data
| 32.692722 | 88 | 0.612252 |
05aed2b7bdb2d62afb387bf3fa03ff50f51651b0 | 43,958 | py | Python | serial_scripts/vm_regression/test_vm_serial.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 1 | 2017-06-13T04:42:34.000Z | 2017-06-13T04:42:34.000Z | serial_scripts/vm_regression/test_vm_serial.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | null | null | null | serial_scripts/vm_regression/test_vm_serial.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | null | null | null | import traffic_tests
from vn_test import *
from vm_test import *
from floating_ip import *
from policy_test import *
from compute_node_test import ComputeNodeFixture
from user_test import UserFixture
from multiple_vn_vm_test import *
from tcutils.wrappers import preposttest_wrapper
sys.path.append(os.path.realpath('tcutils/pkgs/Traffic'))
from traffic.core.stream import Stream
from traffic.core.profile import create, ContinuousProfile
from traffic.core.helpers import Host
from traffic.core.helpers import Sender, Receiver
from base import BaseVnVmTest
from common import isolated_creds
import inspect
from tcutils.util import skip_because
from tcutils.tcpdump_utils import start_tcpdump_for_intf,\
stop_tcpdump_for_intf, verify_tcpdump_count
import test
from tcutils.contrail_status_check import ContrailStatusChecker
# end TestBasicVMVN0
| 46.125918 | 140 | 0.626189 |
05afa4697f046e6af89220c07fb5a8db5f7b4cae | 2,466 | py | Python | odata/tests/test_context.py | suhrawardi/python-odata | 8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0 | [
"MIT"
] | 74 | 2015-04-13T15:12:44.000Z | 2022-01-24T08:06:16.000Z | odata/tests/test_context.py | suhrawardi/python-odata | 8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0 | [
"MIT"
] | 43 | 2015-04-11T15:08:08.000Z | 2021-04-14T16:08:43.000Z | odata/tests/test_context.py | suhrawardi/python-odata | 8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0 | [
"MIT"
] | 63 | 2016-06-22T03:52:39.000Z | 2022-02-25T10:56:34.000Z | # -*- coding: utf-8 -*-
import json
import base64
import decimal
from unittest import TestCase
import requests
import responses
from odata.tests import Service, Product, DemoUnboundAction
| 34.732394 | 92 | 0.623277 |
05b079948e8c02888049d1f77a57cfcbe4bb8e4b | 1,432 | py | Python | readouts/basic_readout.py | qbxlvnf11/graph-neural-networks-for-graph-classification | 5d69ead58c786aa8e472ab0433156fe09fe6ca4b | [
"MIT"
] | 20 | 2020-09-02T07:07:35.000Z | 2022-03-16T15:22:14.000Z | readouts/basic_readout.py | yuexiarenjing/graph-neural-networks-for-graph-classification | 5d69ead58c786aa8e472ab0433156fe09fe6ca4b | [
"MIT"
] | 2 | 2021-11-01T08:32:10.000Z | 2022-03-25T04:29:35.000Z | readouts/basic_readout.py | yuexiarenjing/graph-neural-networks-for-graph-classification | 5d69ead58c786aa8e472ab0433156fe09fe6ca4b | [
"MIT"
] | 11 | 2020-09-02T07:13:46.000Z | 2022-03-23T10:38:07.000Z | import torch | 34.095238 | 77 | 0.552374 |
05b273137ad8f8c40be4550bda786ffd468b9e75 | 362 | py | Python | src/ef/external_field_uniform.py | tnakaicode/ChargedPaticle-LowEnergy | 47b751bcada2af7fc50cef587a48b1a3c12bcbba | [
"MIT"
] | 6 | 2019-04-14T06:19:40.000Z | 2021-09-14T13:46:26.000Z | src/ef/external_field_uniform.py | tnakaicode/ChargedPaticle-LowEnergy | 47b751bcada2af7fc50cef587a48b1a3c12bcbba | [
"MIT"
] | 31 | 2018-03-02T12:05:20.000Z | 2019-02-20T09:29:08.000Z | src/ef/external_field_uniform.py | tnakaicode/ChargedPaticle-LowEnergy | 47b751bcada2af7fc50cef587a48b1a3c12bcbba | [
"MIT"
] | 10 | 2017-12-21T15:16:55.000Z | 2020-10-31T23:59:50.000Z | from ef.external_field import ExternalField
| 30.166667 | 73 | 0.773481 |
05b2b6ec5edc971fee6f55c38fd27eec4af6014d | 11,493 | py | Python | plugins/helpers/EFO.py | opentargets/platform-input-support | 555c3ed091a7a3a767dc0c37054dbcd369f02252 | [
"Apache-2.0"
] | 4 | 2019-03-26T15:54:35.000Z | 2021-05-27T13:18:43.000Z | plugins/helpers/EFO.py | opentargets/platform-input-support | 555c3ed091a7a3a767dc0c37054dbcd369f02252 | [
"Apache-2.0"
] | 12 | 2019-04-23T14:45:04.000Z | 2022-03-17T09:40:04.000Z | plugins/helpers/EFO.py | opentargets/platform-input-support | 555c3ed091a7a3a767dc0c37054dbcd369f02252 | [
"Apache-2.0"
] | 2 | 2019-06-15T17:21:14.000Z | 2021-05-14T18:35:18.000Z | import logging
import re
import json
import jsonlines
from urllib import parse
logger = logging.getLogger(__name__)
# EFO
# The current implementation is based on the conversion from owl format to json lines format using Apache RIOT
# The structure disease_obsolete stores the obsolete terms and it is used to retrieve the relationship between valid
# term and obsolete terms.
# The locationIds are generated retriving the structure parent/child and recursevely retrieve the proper info
| 40.326316 | 116 | 0.59297 |
05b664d9f22c51662666d538e6f424b0f69a4ea2 | 948 | py | Python | interaction3/mfield/tests/test_transmit_receive_beamplot.py | bdshieh/interaction3 | b44c390045cf3b594125e90d2f2f4f617bc2433b | [
"MIT"
] | 2 | 2020-07-08T14:42:52.000Z | 2022-03-13T05:25:55.000Z | interaction3/mfield/tests/test_transmit_receive_beamplot.py | bdshieh/interaction3 | b44c390045cf3b594125e90d2f2f4f617bc2433b | [
"MIT"
] | null | null | null | interaction3/mfield/tests/test_transmit_receive_beamplot.py | bdshieh/interaction3 | b44c390045cf3b594125e90d2f2f4f617bc2433b | [
"MIT"
] | null | null | null |
import numpy as np
from interaction3 import abstract
from interaction3.arrays import matrix
from interaction3.mfield.solvers.transmit_receive_beamplot_2 import TransmitReceiveBeamplot2
array = matrix.create(nelem=[2, 2])
simulation = abstract.MfieldSimulation(sampling_frequency=100e6,
sound_speed=1540,
excitation_center_frequecy=5e6,
excitation_bandwidth=4e6,
field_positions=np.array([[0, 0, 0.05],
[0, 0, 0.06],
[0, 0, 0.07]])
)
kwargs, meta = TransmitReceiveBeamplot2.connector(simulation, array)
sim = TransmitReceiveBeamplot2(**kwargs)
sim.solve()
rf_data = sim.result['rf_data']
times = sim.result['times']
| 35.111111 | 92 | 0.517932 |
05b7efff7d41c4651007c0d46a051ea437cab70c | 16,172 | py | Python | scripts/make_plots.py | facebookresearch/mpcfp | cb29797aa4f2ce524dd584ecf47c863fd9f414a6 | [
"MIT"
] | 5 | 2020-11-18T23:55:17.000Z | 2022-01-14T07:15:35.000Z | scripts/make_plots.py | facebookresearch/mpcfp | cb29797aa4f2ce524dd584ecf47c863fd9f414a6 | [
"MIT"
] | null | null | null | scripts/make_plots.py | facebookresearch/mpcfp | cb29797aa4f2ce524dd584ecf47c863fd9f414a6 | [
"MIT"
] | 2 | 2021-11-06T14:06:13.000Z | 2022-01-14T07:16:29.000Z | #!/usr/bin/env python2
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import seaborn
# constants:
NAN = float('nan')
# From https://blog.graphiq.com/
# finding-the-right-color-palettes-for-data-visualizations-fcd4e707a283
BAR_COLORS_PURPLES = [
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
BAR_COLORS_GRAY_PURPLES = [
(.7, .7, .7),
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
BAR_COLORS_DETECTION = [
(.8, .8, .8),
(.4, .4, .4),
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
LINE_COLORS = seaborn.cubehelix_palette(
4, start=2, rot=0, dark=0.15, light=0.75, reverse=False, as_cmap=False)
BAR_COLORS = BAR_COLORS_GRAY_PURPLES
FS = 18
color_counter = [0]
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
# make generic line plot:
# make all the plots:
# run all the things:
if __name__ == '__main__':
main()
| 32.539235 | 79 | 0.537225 |
05b87ef1f9d957ce2aacbc7ba9bf31d3f24627e5 | 2,782 | py | Python | example_backtesting.py | brokenlab/finance4py | 839fb4c262c369973c1afaebb23291355f8b4668 | [
"MIT"
] | 6 | 2016-12-28T03:40:46.000Z | 2017-03-31T12:04:43.000Z | example_backtesting.py | brokenlab/finance4py | 839fb4c262c369973c1afaebb23291355f8b4668 | [
"MIT"
] | null | null | null | example_backtesting.py | brokenlab/finance4py | 839fb4c262c369973c1afaebb23291355f8b4668 | [
"MIT"
] | 3 | 2018-04-26T03:14:29.000Z | 2021-06-13T16:18:04.000Z | # -*- coding: utf-8 -*-
'''
* finance4py
* Based on Python Data Analysis Library.
* 2016/03/22 by Sheg-Huai Wang <m10215059@csie.ntust.edu.tw>
* Copyright (c) 2016, finance4py team
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from finance4py import Stock
from finance4py.backtesting import BandTest
from pylab import *
if __name__ == '__main__':
#
s = Stock('2330', '2015-10-31', '2016-03-05')
bt = BandTest(s)
#
# K, D
s['K'], s['D'] = s.KD()
# => def (, , )
#
bt.addStrategy('KD', golden_cross)
#
s['MA5'] = s.MA()
s['MA20'] = s.MA(20)
bt.addStrategy('', average_cross)
#
s['DIF'], s['DEM'], s['OSC']= s.MACD()
bt.addStrategy('MACD', macd_cross)
# ()
bt.plot()
show() | 35.21519 | 104 | 0.727175 |
05b8e002f7910268a9002f66a3d07f197f31db7a | 1,778 | py | Python | utils/cloud/cloud_client/__init__.py | alexfdo/asr_eval | d1573cc3113ce9df1ae64c3b91b5f495e2cff9a3 | [
"MIT"
] | 3 | 2020-03-06T17:20:34.000Z | 2021-09-09T09:18:48.000Z | utils/cloud/cloud_client/__init__.py | alexfdo/asr_eval | d1573cc3113ce9df1ae64c3b91b5f495e2cff9a3 | [
"MIT"
] | 1 | 2020-02-03T18:25:08.000Z | 2020-02-03T18:25:08.000Z | utils/cloud/cloud_client/__init__.py | alexfdo/asr_eval | d1573cc3113ce9df1ae64c3b91b5f495e2cff9a3 | [
"MIT"
] | 1 | 2020-01-29T19:47:54.000Z | 2020-01-29T19:47:54.000Z | # coding: utf-8
# flake8: noqa
"""
ASR documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.dev
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from cloud_client.api.packages_api import PackagesApi
from cloud_client.api.recognize_api import RecognizeApi
from cloud_client.api.session_api import SessionApi
# import ApiClient
from cloud_client.cloud_api_client import CloudApiClient
from cloud_client.configuration import Configuration
# import models into sdk package
from cloud_client.models.advanced_recognition_request_dto import AdvancedRecognitionRequestDto
from cloud_client.models.asr_advanced_result_dto import ASRAdvancedResultDto
from cloud_client.models.asr_result_dto import ASRResultDto
from cloud_client.models.audio_file_dto import AudioFileDto
from cloud_client.models.auth_request_dto import AuthRequestDto
from cloud_client.models.auth_response_dto import AuthResponseDto
from cloud_client.models.auth_status_dto import AuthStatusDto
from cloud_client.models.message_dto import MessageDto
from cloud_client.models.package_dto import PackageDto
from cloud_client.models.recognition_request_dto import RecognitionRequestDto
from cloud_client.models.sessionless_recognition_request_dto import SessionlessRecognitionRequestDto
from cloud_client.models.start_session_request import StartSessionRequest
from cloud_client.models.status_dto import StatusDto
from cloud_client.models.stream_request_dto import StreamRequestDto
from cloud_client.models.stream_response_dto import StreamResponseDto
from cloud_client.models.word_dto import WordDto
| 41.348837 | 119 | 0.865017 |
05b95038357172273cd6bf5b94205ef5e3a1bff8 | 2,818 | py | Python | main.py | af12066/cancel-sit | 29977bb86927e69ae7f94a160ef4d1fb810f0117 | [
"MIT"
] | null | null | null | main.py | af12066/cancel-sit | 29977bb86927e69ae7f94a160ef4d1fb810f0117 | [
"MIT"
] | null | null | null | main.py | af12066/cancel-sit | 29977bb86927e69ae7f94a160ef4d1fb810f0117 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) T. H.
import urllib.request
import re
import urllib.parse
import codecs
import filecmp
import os.path
import os
from bs4 import BeautifulSoup
from slacker import Slacker
from datetime import datetime
if __name__ == '__main__':
slack = Slack('...')
print(slack.get_channnel_list())
#
uri = 'http://attend.sic.shibaura-it.ac.jp/cancelCalendar/t04/calendar{0:d}{1:02d}-{2:02d}.html'.format(datetime.today().year, datetime.today().month, (lambda x: x if x != 12 else x - 11)(datetime.today().month + 1))
html = urllib.request.urlopen(uri)
soup = BeautifulSoup(html, 'lxml')
link = soup.find_all('a', href=re.compile("/cancel/")) #href'/cancel/'a
for a in link:
path = urllib.parse.urljoin(uri, a['href']) #href
print(path)
fileName = path.split('/')[-1]
fileName = fileName.replace("html", "txt")
html2 = urllib.request.urlopen(path) #URL
soup2 = BeautifulSoup(html2, 'lxml')
dat = soup2.find_all(text=True) #
settext = "\n".join([x for x in dat if x != '\n']) #
#
#
# '.tmp'
# txttmptxtSlack
if os.path.isfile(fileName):
tmpfileName = fileName + '.tmp'
writeFile(tmpfileName, settext)
if filecmp.cmp(fileName, tmpfileName):
print("no diff")
else:
writeFile(fileName, settext)
slack.post_message_to_channel("class", settext) #Slack (, )
os.remove(tmpfileName)
else:
#print('write a new file')
slack.post_message_to_channel("class", settext) #Slack (, )
writeFile(fileName, settext)
| 29.663158 | 220 | 0.625621 |
05ba89852c4740460e1cce9740e5ab37d0b77443 | 582 | py | Python | minitf/kernel/_numpy_math.py | guocuimi/minitf | f272a6b1546b82aaec41ec7d2c2d34fa40a40385 | [
"MIT"
] | 7 | 2020-02-10T08:16:30.000Z | 2021-01-31T14:08:02.000Z | minitf/kernel/_numpy_math.py | guocuimi/minitf | f272a6b1546b82aaec41ec7d2c2d34fa40a40385 | [
"MIT"
] | 1 | 2020-02-29T01:57:54.000Z | 2020-02-29T01:57:54.000Z | minitf/kernel/_numpy_math.py | guocuimi/minitf | f272a6b1546b82aaec41ec7d2c2d34fa40a40385 | [
"MIT"
] | null | null | null | import numpy as _np
from minitf.kernel.core import notrace_primitive
from minitf.kernel.core import primitive
# ----- Differentiable functions -----
add = primitive(_np.add)
subtract = primitive(_np.subtract)
multiply = primitive(_np.multiply)
divide = primitive(_np.divide)
dot = primitive(_np.dot)
square = primitive(_np.square)
reduce_mean = primitive(_np.average)
exp = primitive(_np.exp)
negative = primitive(_np.negative)
maximum = primitive(_np.maximum)
minimum = primitive(_np.minimum)
# temporarily put it here as nograd function
reduce_sum = notrace_primitive(_np.sum)
| 27.714286 | 48 | 0.780069 |
05bdd1c7fb73fc917e7e9bacb41962e3873e9769 | 5,802 | py | Python | map/migrations/0001_initial.py | matthewoconnor/mapplot-cdp | 19513e6617f878d717ab4e917ffc9d22270edcfe | [
"MIT"
] | null | null | null | map/migrations/0001_initial.py | matthewoconnor/mapplot-cdp | 19513e6617f878d717ab4e917ffc9d22270edcfe | [
"MIT"
] | null | null | null | map/migrations/0001_initial.py | matthewoconnor/mapplot-cdp | 19513e6617f878d717ab4e917ffc9d22270edcfe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-01-10 20:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 62.387097 | 275 | 0.608239 |
05be03857ac9bab749c288e65ba7f0f36541df9b | 4,561 | py | Python | Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\gsi_handlers\object_lost_and_found_service_handlers.py
# Compiled at: 2018-10-26 00:20:22
# Size of source mod 2**32: 4629 bytes
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
import services
olaf_service_objects_schema = GsiGridSchema(label='Object Lost & Found')
olaf_service_objects_schema.add_field('object', label='Object')
olaf_service_objects_schema.add_field('zone', label='Zone')
olaf_service_objects_schema.add_field('street', label='Street')
olaf_service_objects_schema.add_field('sim', label='Sim')
olaf_service_objects_schema.add_field('household', label='Household')
olaf_service_deleted_clone_schema = GsiGridSchema(label='Object Lost & Found/To Be Deleted')
olaf_service_deleted_clone_schema.add_field('object', label='Object')
olaf_service_deleted_clone_schema.add_field('zone', label='Zone')
olaf_service_deleted_clone_schema.add_field('street', label='Street')
| 44.281553 | 110 | 0.70182 |
05bf284e1bf49c109f8df75324eddb8540d17a61 | 685 | py | Python | testing/test_pendulum.py | delock/pytorch-a3c-mujoco | 82e0c854417ac05e0f414eab1710794d41515591 | [
"MIT"
] | null | null | null | testing/test_pendulum.py | delock/pytorch-a3c-mujoco | 82e0c854417ac05e0f414eab1710794d41515591 | [
"MIT"
] | null | null | null | testing/test_pendulum.py | delock/pytorch-a3c-mujoco | 82e0c854417ac05e0f414eab1710794d41515591 | [
"MIT"
] | null | null | null | #Importing OpenAI gym package and MuJoCo engine
import gym
import numpy as np
import mujoco_py
import matplotlib.pyplot as plt
import env
#Setting MountainCar-v0 as the environment
env = gym.make('InvertedPendulum-down')
#Sets an initial state
env.reset()
print (env.action_space)
# Rendering our instance 300 times
i = 0
while True:
#renders the environment
env.render()
#Takes a random action from its action space
# aka the number of unique actions an agent can perform
action = env.action_space.sample()
ob, reward, done, _ = env.step([-5])
if i == 0:
print (action)
print ("ob = {}, reward = {}, done = {}".format(ob, reward, done))
i += 1
env.close()
| 25.37037 | 70 | 0.706569 |
05bf7c9f0303c517554bb2670af4a9a4baf2a54a | 5,317 | py | Python | plots/plot_drift_types.py | ChristophRaab/RRSLVQ | e265f62e023bd3ca23273b51e06035fd3c0b7c94 | [
"MIT"
] | 1 | 2021-06-22T20:54:03.000Z | 2021-06-22T20:54:03.000Z | plots/plot_drift_types.py | ChristophRaab/RRSLVQ | e265f62e023bd3ca23273b51e06035fd3c0b7c94 | [
"MIT"
] | 5 | 2020-04-20T09:31:02.000Z | 2021-07-10T01:23:36.000Z | plots/plot_drift_types.py | ChristophRaab/RRSLVQ | e265f62e023bd3ca23273b51e06035fd3c0b7c94 | [
"MIT"
] | 1 | 2020-07-03T04:00:47.000Z | 2020-07-03T04:00:47.000Z | import matplotlib.pyplot as plt
import numpy as np
from scipy.special import logit
import pandas as pd
from matplotlib.axes import Axes, Subplot
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
SMALL = 14
SIZE = 16
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL) # legend fontsize
plt.rc('figure', titlesize=SIZE) # fontsize of the figure title
plt.rc('lines', lw=4)
#reoccuring_drift(width=600,filename="frequent_reoccuing_drift.eps") # Frequent Reoccurring
#reoccuring_drift(width=1000,rate=0.4) # Reoccurring
#incremental_drift(width=15000) # Incremental
#incremental_drift(width=2500,filename="abrupt_drift.eps") # Abrupt
gradual_drift(length=45000,width=1000,rate=0.3) #Gradual
| 33.024845 | 130 | 0.671995 |
05c1f456776569370085a917d41ee8b850f0a3b7 | 15,773 | py | Python | simulation/src/utils.py | VIDA-NYU/pedestrian-sensing-model | e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf | [
"MIT"
] | 2 | 2020-01-14T12:44:11.000Z | 2021-09-29T16:09:37.000Z | simulation/src/utils.py | VIDA-NYU/pedestrian-sensing-model | e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf | [
"MIT"
] | 1 | 2021-09-11T14:13:57.000Z | 2021-09-11T14:13:57.000Z | simulation/src/utils.py | VIDA-NYU/pedestrian-sensing-model | e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf | [
"MIT"
] | 2 | 2020-07-13T17:08:25.000Z | 2021-03-31T15:10:58.000Z | #!/usr/bin/env python3
import numpy as np
import math
import random
import time
import scipy.misc
import scipy.signal
import multiprocessing
import json
import itertools
import os
import pprint
from collections import namedtuple
from fractions import gcd
from optimized import get_distance
OBSTACLE = -1
MAX = 2147483647 #MAXIMUM INT 32
Graph = namedtuple('Graph', 'adj nodes2d nodesflat indices cachedravel ' \
'mapshape nnodes maplen')
##########################################################
def get_distance_from_npy_idx(npypos1, npypos2, mapshape):
"""Compute manhattan difference tween @pos1 and @pos2.
Args:
pos1(tuple): position 1 in flattened numpy array
pos2(tuple): position 2 in flattened numpy array
Returns:
float: manhattan difference
"""
pos1 = np.array(np.unravel_index(npypos1, mapshape))
pos2 = np.array(np.unravel_index(npypos2, mapshape))
return get_distance(pos1, pos2)
def parse_image(imagefile, thresh=128):
"""Parse the streets from image and return a numpy ndarray,
with 0 as streets and OBSTACLE as non-streets. Assumes a
BW image as input, with pixels in white representing streets.
Args:
imagefile(str): image path
Returns:
numpy.ndarray: structure of the image
"""
img = scipy.misc.imread(imagefile)
if img.ndim > 2: img = img[:, :, 0]
return (img > thresh).astype(int) - 1
def find_crossings_crossshape(npmap):
"""Convolve with kernel considering input with
0 as streets and OBSTACLE as non-streets. Assumes a
BW image as input, with pixels in black representing streets.
Args:
npmap(numpy.ndarray): ndarray with two dimensions composed of -1 (obstacles)
and 0 (travesable paths)
Returns:
list: set of indices that contains the nodes
"""
ker = np.array([[0,1,0], [1, 1, 1], [0, 1, 0]])
convolved = scipy.signal.convolve2d(npmap, ker, mode='same',
boundary='fill', fillvalue=OBSTACLE)
inds = np.where(convolved >= OBSTACLE)
return set([ (a,b) for a,b in zip(inds[0], inds[1]) ])
def find_crossings_squareshape(npmap, supressredundant=True):
"""Convolve with kernel considering input with
0 as streets and -1 as non-streets. Assumes a
BW image as input, with pixels in black representing streets.
Args:
npmap(numpy.ndarray): ndarray with two dimensions composed of -1 (obstacles)
and 0 (travesable paths)
Returns:
list: set of indices that contains the nodes
"""
ker = np.array([[1,1], [1, 1]])
convolved = scipy.signal.convolve2d(npmap, ker, mode='same',
boundary='fill', fillvalue=OBSTACLE)
inds = np.where(convolved >= 0)
crossings = np.array([ np.array([a,b]) for a,b in zip(inds[0], inds[1]) ])
if supressredundant:
return filter_by_distance(crossings)
else: return crossings
def filter_by_distance(points, mindist=4):
"""Evaluate the distance between each pair os points in @points
and return just the ones with distance gt @mindist
Args:
points(set of tuples): set of positions
mindist(int): minimum distance
Returns:
set: set of points with a minimum distance between each other
"""
cr = list(points)
npoints = len(points)
valid = np.full(npoints, np.True_)
for i in range(npoints):
if not valid[i]: continue
for j in range(i + 1, npoints):
dist = get_distance(cr[i], cr[j])
if dist < mindist: valid[j] = np.False_
return points[valid]
##########################################################
def compute_heuristics(nodes, goal):
"""Compute heuristics based on the adjcency matrix provided and on the goal. If the guy is in the adjmatrix, then it is not an obstacle.
IMPORTANT: We assume that there is just one connected component.
Args:
adjmatrix(dict of list of neighbours): posiitons as keys and neighbours as values
goal(tuple): goal position
Returns:
dict of heuristics: heuristic for each position
"""
subt = np.subtract
abso = np.absolute
return {v: np.sum(abso(subt(v, goal))) for v in nodes}
##########################################################
##########################################################
def get_adjmatrix_from_npy(_map):
"""Easiest approach, considering 1 for each neighbour.
"""
connectivity = 8
h, w = _map.shape
nodes = np.empty((1, 0), dtype=int)
adj = np.empty((0, 10), dtype=int)
for j in range(0, h):
for i in range(0, w):
if _map[j, i] == OBSTACLE: continue
nodes = np.append(nodes, np.ravel_multi_index((j, i), _map.shape))
ns1, ns2 = get_neighbours_coords((j, i), _map.shape)
neigh[0] = -1
acc = 1
neigh = np.full(connectivity, -1)
for jj, ii in ns1:
if _map[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _map.shape)
acc += 1
neigh[acc] = -1.4142135623730951 #sqrt(2)
acc += 1
adj = np.append(adj, np.reshape(neigh, (1, 10)), axis=0)
return nodes, adj
##########################################################
def get_full_adjmatrix_from_npy(_mapmatrix):
"""Create a graph structure of a 2d matrix with two possible values: OBSTACLE
or 0. It returns a big structure in different formats to suit every need
Returns:
Structure with attributes
adj(maplen, 10) - stores the neighbours of each npy coordinate
nodes2d(nnodes, 2) - sparse list of nodes in 2d
nodesflat(nnodes) - sparse list of nodes in npy
indices(maplen) - dense list of points in sparse indexing
cachedravel(mapshape) - cached ravel of points to be used
mapshape(2) - height and width
nnodes(1) - number of traversable nodes
"""
h, w = _mapmatrix.shape
maplen = np.product(_mapmatrix.shape)
adj = np.full((np.product(_mapmatrix.shape), 10), -1, dtype=int)
nodes2d = np.full((maplen, 2), -1, dtype=int)
nodesflat = np.empty((0, 1), dtype=int)
indices = np.full(maplen, -1, dtype=int)
cachedravel = np.full(_mapmatrix.shape, -1)
nodesidx = 0
#TODO: convert everything to numpy indexing
for j in range(h):
for i in range(w):
if _mapmatrix[j, i] == OBSTACLE: continue
npyidx = np.ravel_multi_index((j, i), _mapmatrix.shape)
indices[npyidx] = nodesidx
nodes2d[nodesidx] = np.array([j, i])
ns1, ns2 = get_neighbours_coords((j, i), _mapmatrix.shape)
neigh = np.full(10, -MAX)
neigh[0] = -1
acc = 1
cachedravel[j, i] = npyidx
for jj, ii in ns1:
if _mapmatrix[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _mapmatrix.shape)
acc += 1
neigh[acc] = -2 #sqrt(2)
acc += 1
for jj, ii in ns2:
if _mapmatrix[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _mapmatrix.shape)
acc += 1
adj[npyidx] = np.reshape(neigh, (1, 10))
nodesidx += 1
nodes2d = nodes2d[:nodesidx]
nodesflat = np.array([ np.ravel_multi_index((xx, yy),_mapmatrix.shape) for xx, yy in nodes2d])
return Graph(adj=adj, nodes2d=nodes2d, nodesflat=nodesflat,
indices=indices, cachedravel=cachedravel,
mapshape=_mapmatrix.shape, nnodes=len(nodesflat),
maplen=np.product(_mapmatrix.shape))
##########################################################
def get_neighbours_coords(pos, mapshape):
""" Get neighbours. Do _not_ verify whether it is a valid coordinate
Args:
j(int): y coordinate
i(int): x coordinate
connectedness(int): how consider the neighbourhood, 4 or 8
yourself(bool): the point itself is included in the return
The order returned is:
5 1 6
4 9 2
8 3 7
"""
j, i = pos
neighbours1 = [ (j-1, i), (j, i+1), (j+1, i), (j, i-1) ]
neighbours2 = [(j-1, i-1), (j-1, i+1), (j+1, i+1), (j+1, i-1) ]
n1 = eliminate_nonvalid_coords(neighbours1, mapshape)
n2 = eliminate_nonvalid_coords(neighbours2, mapshape)
return (n1, n2)
#########################################################
def get_neighbours_coords_npy_indices(idx, mapshape, connectedness=8,
yourself=False):
""" Get neighbours. Do _not_ verify whether it is a valid coordinate
Args:
idx(int): npy indexing of a matrix
connectedness(int): how consider the neighbourhood, 8 or 4
yourself(bool): the point itself is included in the return
The order returned is:
c5 c1 c6
c4 c9 c2
c8 c3 c7
"""
nrows, ncols = mapshape
maplen = np.product(mapshape)
c1 = idx - ncols
c2 = idx + 1
c3 = idx + ncols
c4 = idx - 1
neighbours = []
if c1 >= 0 : neighbours.append(c1)
if c2 < maplen: neighbours.append(c2)
if c3 < maplen: neighbours.append(c3)
if c4 >= 0 : neighbours.append(c4)
if connectedness == 8:
c5 = c1 - 1
c6 = c1 + 1
c7 = c3 + 1
c8 = c3 - 1
if c5 >= 0:
neighbours.append(c5)
neighbours.append(c6)
if c7 < maplen:
neighbours.append(c7)
neighbours.append(c8)
if yourself: neighbours.append(idx)
return neighbours
##########################################################
def eliminate_nonvalid_coords(coords, mapshape):
""" Eliminate nonvalid indices
Args:
coords(set of tuples): input set of positions
h(int): height
w(int): width
Returns:
set of valid coordinates
"""
h, w = mapshape
valid = []
for j, i in coords:
if j < 0 or j >= h: continue
if i < 0 or i >= w: continue
valid.append((j, i))
return valid
##########################################################
def get_adjmatrix_from_image(image):
"""Get the adjacenty matrix from image
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(image)
return get_full_adjmatrix_from_npy(searchmap)
##########################################################
def get_crossings_from_image(imagefile):
"""Get crossings from image file
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(imagefile)
return find_crossings_squareshape(searchmap)
##########################################################
def get_obstacles_from_image(imagefile):
"""Get obstacles from image file
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(imagefile)
indices = np.where(searchmap==OBSTACLE)
return set(map(tuple, np.transpose(indices)))
##########################################################
def get_mapshape_from_searchmap(hashtable):
"""Suppose keys have the form (x, y). We want max(x), max(y)
such that not necessarily the key (max(x), max(y)) exists
Args:
hashtable(dict): key-value pairs
Returns:
int, int: max values for the keys
"""
ks = hashtable.keys()
h = max([y[0] for y in ks])
w = max([x[1] for x in ks])
return h+1, w+1
##########################################################
##########################################################
##########################################################
##########################################################
##########################################################
##########################################################
| 30.216475 | 140 | 0.573131 |
05c354eab5a376b1dcdf00dc912ca4e24bdc43ea | 2,438 | py | Python | luxor/controllers/types.py | sam007961/luxor | 31838c937b61bfbc400103d58ec5b5070471767e | [
"MIT"
] | null | null | null | luxor/controllers/types.py | sam007961/luxor | 31838c937b61bfbc400103d58ec5b5070471767e | [
"MIT"
] | 5 | 2020-09-06T15:44:13.000Z | 2020-11-02T11:30:22.000Z | luxor/controllers/types.py | sam007961/luxor | 31838c937b61bfbc400103d58ec5b5070471767e | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Union
from luxor.core.events import Event
from luxor.controllers.expressions import Var
Number = Union[int, float, Int]
| 30.098765 | 73 | 0.511895 |
05c47851eed298a1ca3b5574ee61fdfb8228a592 | 412 | py | Python | Les 1/1_1.py | tloader11/TICT-V1PROG-15 | dac7e991dcb11a397082bdceaf60a07b9bbc1a4a | [
"Unlicense"
] | null | null | null | Les 1/1_1.py | tloader11/TICT-V1PROG-15 | dac7e991dcb11a397082bdceaf60a07b9bbc1a4a | [
"Unlicense"
] | null | null | null | Les 1/1_1.py | tloader11/TICT-V1PROG-15 | dac7e991dcb11a397082bdceaf60a07b9bbc1a4a | [
"Unlicense"
] | null | null | null | 5 5 integer
5.0 5.0 float
5 % 2 1 int
5 > 1 True boolean
'5' '5' String
5 * 2 10 int
'5' * 2 '55' String
'5' + '2' '52' String
5 / 2 2.5 float
5 // 2 2 int
[5, 2, 1] [5,2,1] list?
5 in [1, 4, 6] False boolean
| 29.428571 | 35 | 0.279126 |
05c54a12ada174aedbee75dcfaa2218242c10ec6 | 1,270 | py | Python | edgecast/command_line.py | ganguera/edgecast | 43ab240698a50c1382eb11bdb79acc5683bc10ea | [
"MIT"
] | null | null | null | edgecast/command_line.py | ganguera/edgecast | 43ab240698a50c1382eb11bdb79acc5683bc10ea | [
"MIT"
] | null | null | null | edgecast/command_line.py | ganguera/edgecast | 43ab240698a50c1382eb11bdb79acc5683bc10ea | [
"MIT"
] | null | null | null | import argparse
import arrow
import json
import config
from . import EdgecastReportReader
from media_type import PLATFORM
| 27.608696 | 108 | 0.684252 |
05c66e3dcdf2a391e7cb2ae90afaebe8a08c59e9 | 3,483 | py | Python | skeletons/browser/browser.py | gbkim000/wxPython | b1604d71cf04801f9efa8b26b935561a88ef1daa | [
"BSD-2-Clause"
] | 80 | 2018-05-25T00:37:25.000Z | 2022-03-13T12:31:02.000Z | skeletons/browser/browser.py | gbkim000/wxPython | b1604d71cf04801f9efa8b26b935561a88ef1daa | [
"BSD-2-Clause"
] | 1 | 2021-01-08T20:22:52.000Z | 2021-01-08T20:22:52.000Z | skeletons/browser/browser.py | gbkim000/wxPython | b1604d71cf04801f9efa8b26b935561a88ef1daa | [
"BSD-2-Clause"
] | 32 | 2018-05-24T05:40:55.000Z | 2022-03-24T00:32:11.000Z | #!/usr/bin/python
"""
ZetCode wxPython tutorial
This program creates a browser UI.
author: Jan Bodnar
website: zetcode.com
last edited: May 2018
"""
import wx
from wx.lib.buttons import GenBitmapTextButton
if __name__ == '__main__':
main()
| 27.642857 | 83 | 0.584266 |
05c7ce421e8fdf3698aad581723528f431eaafbe | 1,673 | py | Python | model/tds_block.py | SABER-labs/SABERv2 | 028d403beadec3adebd51582fd8ef896a2fe3696 | [
"MIT"
] | 1 | 2022-03-02T02:52:24.000Z | 2022-03-02T02:52:24.000Z | model/tds_block.py | SABER-labs/SABERv2 | 028d403beadec3adebd51582fd8ef896a2fe3696 | [
"MIT"
] | null | null | null | model/tds_block.py | SABER-labs/SABERv2 | 028d403beadec3adebd51582fd8ef896a2fe3696 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
if __name__ == "__main__":
model = TDSBlock(15, 10, 80, 0.1, 1)
x = torch.rand(8, 15, 80, 400)
import time
start = time.perf_counter()
model(x)
end = time.perf_counter()
print(f"Time taken: {(end-start)*1000:.3f}ms")
| 28.355932 | 77 | 0.545129 |
05c8724a622688c0f5c093058bd7213a2efddffc | 1,968 | py | Python | blackcompany/serve/vcs.py | clckwrkbdgr/blackcompany | 9164a0db3e9f11878ce12da6ebdf82a300e1c6f4 | [
"WTFPL"
] | null | null | null | blackcompany/serve/vcs.py | clckwrkbdgr/blackcompany | 9164a0db3e9f11878ce12da6ebdf82a300e1c6f4 | [
"WTFPL"
] | null | null | null | blackcompany/serve/vcs.py | clckwrkbdgr/blackcompany | 9164a0db3e9f11878ce12da6ebdf82a300e1c6f4 | [
"WTFPL"
] | null | null | null | from ._base import Endpoint
from ..util._six import Path
import bottle
from ..util import gitHttpBackend
def git_repo(route, repo_root, **serve_params):
""" Defines Git repo endpoint on given route with given root.
Endpoint() objects will be created for GET and POST.
Rest of parameters will be passed through to underlying Endpoint() objects.
"""
backend = GitHTTPBackend(route, repo_root)
get_endpoint = Endpoint(route, None, method='GET', custom_handler=MethodHandler(backend.get, 'path:path'), **serve_params)
get_endpoint.serve()
post_endpoint = Endpoint(route, None, method='POST', custom_handler=MethodHandler(backend.post, 'path:path'), read_data=False, **serve_params)
post_endpoint.serve()
| 37.846154 | 143 | 0.758638 |
05cc0547376efd7b3d0398149b11f68433ccaf60 | 2,999 | py | Python | imaginaire/discriminators/cagan.py | zebincai/imaginaire | f5a707f449d93c33fbfe19bcd975a476f2c1dd7a | [
"RSA-MD"
] | null | null | null | imaginaire/discriminators/cagan.py | zebincai/imaginaire | f5a707f449d93c33fbfe19bcd975a476f2c1dd7a | [
"RSA-MD"
] | null | null | null | imaginaire/discriminators/cagan.py | zebincai/imaginaire | f5a707f449d93c33fbfe19bcd975a476f2c1dd7a | [
"RSA-MD"
] | null | null | null | # Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import torch
import torch.nn as nn
from imaginaire.layers import Conv2dBlock
from imaginaire.layers.misc import ApplyNoise
if __name__ == "__main__":
from imaginaire.config import Config
cfg = Config("D:/workspace/develop/imaginaire/configs/projects/cagan/LipMPV/base.yaml")
dis = Discriminator(cfg.dis, cfg.data)
batch = torch.randn((8, 6, 256, 192))
y = dis(batch)
print(y.shape)
| 40.527027 | 102 | 0.617206 |
05cc10143e791bcc38db23bf914cc748df6a3237 | 2,959 | py | Python | Chapter10/Ch10/server/database.py | henrryyanez/Tkinter-GUI-Programming-by-Example | c8a326d6034b5e54f77605a8ec840cb8fac89412 | [
"MIT"
] | 127 | 2018-08-27T16:34:43.000Z | 2022-03-22T19:20:53.000Z | Chapter10/Ch10/server/database.py | PiotrAdaszewski/Tkinter-GUI-Programming-by-Example | c8a326d6034b5e54f77605a8ec840cb8fac89412 | [
"MIT"
] | 8 | 2019-04-11T06:47:36.000Z | 2022-03-11T23:23:42.000Z | Chapter10/Ch10/server/database.py | PiotrAdaszewski/Tkinter-GUI-Programming-by-Example | c8a326d6034b5e54f77605a8ec840cb8fac89412 | [
"MIT"
] | 85 | 2018-04-30T19:42:21.000Z | 2022-03-30T01:22:54.000Z | import sqlite3
| 30.822917 | 117 | 0.630618 |
05cea8e33b54e9775229454c04e0071781d3127e | 938 | py | Python | ad_hoc_scripts/update_by_condition.py | IgorZyktin/MediaStorageSystem | df8d260581cb806eb54f320d63aa674c6175c17e | [
"MIT"
] | 2 | 2021-03-06T16:07:30.000Z | 2021-03-17T10:27:25.000Z | ad_hoc_scripts/update_by_condition.py | IgorZyktin/MediaStorageSystem | df8d260581cb806eb54f320d63aa674c6175c17e | [
"MIT"
] | null | null | null | ad_hoc_scripts/update_by_condition.py | IgorZyktin/MediaStorageSystem | df8d260581cb806eb54f320d63aa674c6175c17e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Non user friendly script.
"""
from mss.core.class_filesystem import Filesystem
def update_by_condition(root_path: str, theme: str):
"""Change records by condition."""
fs = Filesystem()
path = fs.join(root_path, theme, 'metainfo')
for folder, filename, name, ext in fs.iter_ext(path):
modified = False
if ext != '.json':
continue
full_path = fs.join(folder, filename)
content = fs.read_json(full_path)
for uuid, record in content.items():
if record['group_name'] == 'grand mal 1 rus':
record['sub_series'] = 'grand mal 1 rus'
modified = True
if modified:
fs.write_json(full_path, content)
print(f'Modified: {full_path}')
if __name__ == '__main__':
update_by_condition(
root_path='D:\\BGC_ARCHIVE_TARGET\\',
theme='bubblegum_crisis',
)
| 26.055556 | 57 | 0.590618 |
05cf590b42b6da085a51776ee9e5aa949a057c25 | 2,555 | py | Python | 2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py | link-kut/deeplink_public | 688c379bfeb63156e865d78d0428f97d7d203cc1 | [
"MIT"
] | null | null | null | 2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py | link-kut/deeplink_public | 688c379bfeb63156e865d78d0428f97d7d203cc1 | [
"MIT"
] | 11 | 2020-01-28T22:33:49.000Z | 2022-03-11T23:41:08.000Z | 2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py | link-kut/deeplink_public | 688c379bfeb63156e865d78d0428f97d7d203cc1 | [
"MIT"
] | 2 | 2019-06-01T04:14:52.000Z | 2020-05-31T08:13:23.000Z | from environment import *
import random | 39.307692 | 94 | 0.600391 |
05cff405e8dd7ef93166ffc63471b8011294be84 | 8,289 | py | Python | csimpy/test.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | null | null | null | csimpy/test.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | null | null | null | csimpy/test.py | dewancse/csimpy | 58c32e40e5d991b4ca98df05e6f61020def475a9 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
from math import *
from scipy import integrate
import matplotlib.pyplot as plt
from libcellml import *
import lxml.etree as ET
__version__ = "0.1.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 1
VARIABLE_COUNT = 29
VOI_INFO = {"name": "time", "units": "second", "component": "environment"}
STATE_INFO = [
{"name": "pH_ext", "units": "dimensionless", "component": "Concentrations"}
]
VARIABLE_INFO = [
{"name": "C_ext_NH4", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_ext_Na", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_H", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_NH4", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_Na", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "K_NHE3_H", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "K_NHE3_NH4", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "K_NHE3_Na", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_H", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "C_ext_H", "units": "mM", "component": "Concentrations", "type": VariableType.ALGEBRAIC},
{"name": "alpha_ext_Na", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "beta_ext_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "gamma_ext_NH4", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "alpha_int_Na", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "beta_int_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "gamma_int_NH4", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_H", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "sum_NHE3", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_H", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_Na_Max", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "plot_a", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "plot_b", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "K_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT}
]
# LSODA
start = 0.0
end = 1
numpoints = 1000
stepsize = (end - start) / numpoints
print(start, end, numpoints, stepsize)
states = create_states_array()
variables = create_variables_array()
initialize_states_and_constants(states, variables)
compute_computed_constants(variables) # added this line
temp = []
print("start: ", start)
print("end: ", end)
print("states: ", states)
solution = integrate.solve_ivp(func,[start, end], states, method='LSODA', max_step=stepsize, atol=1e-4, rtol=1e-6)
print(solution.t)
print(solution.y)
# graph
fig, ax = plt.subplots()
ax.plot(solution.y[0], temp, label='Line 1')
ax.set_xlabel('t')
ax.set_ylabel('y')
ax.set_title('Some Title')
ax.legend()
fig.savefig('test.png')
# # test
# def exponential_decay(t, y):
# return -0.5 * y
#
# sol = integrate.solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
#
# print(sol.t)
# print(sol.y)
#
# fig2, ax2 = plt.subplots()
# ax2.plot(sol.t, sol.y[0], label='Line 1')
# ax2.plot(sol.t, sol.y[1], label='Line 2')
# ax2.plot(sol.t, sol.y[2], label='Line 3')
# ax2.set_xlabel('x label')
# ax2.set_ylabel('y label')
# ax2.set_title('Simple Plot')
# ax2.legend()
# fig2.savefig('test.png')
# convert cellml1.0 or 1.1 to 2.0
# with open('../tests/fixtures/chang_fujita_1999.xml') as f:
# read_data = f.read()
# f.close()
#
# p = Parser()
# importedModel = p.parseModel(read_data)
#
# # parsing cellml 1.0 or 1.1 to 2.0
# dom = ET.fromstring(read_data.encode("utf-8"))
# xslt = ET.parse("../tests/fixtures/cellml1to2.xsl")
# transform = ET.XSLT(xslt)
# newdom = transform(dom)
#
# mstr = ET.tostring(newdom, pretty_print=True)
# mstr = mstr.decode("utf-8")
#
# # parse the string representation of the model to access by libcellml
# importedModel = p.parseModel(mstr)
#
# f = open('../tests/fixtures/chang_fujita_1999.xml', 'w')
# f.write(mstr) | 42.507692 | 268 | 0.68054 |
05d337eef8af353471796ace517f3b818298177f | 2,342 | py | Python | camera_calib/image.py | justinblaber/camera_calib_python | 9427ff31d55af7619e7aee74136446a31d10def0 | [
"Apache-2.0"
] | 3 | 2020-10-14T10:24:09.000Z | 2021-09-19T20:48:40.000Z | camera_calib/image.py | justinblaber/camera_calib_python | 9427ff31d55af7619e7aee74136446a31d10def0 | [
"Apache-2.0"
] | 1 | 2021-09-28T02:06:42.000Z | 2021-09-28T02:06:42.000Z | camera_calib/image.py | justinblaber/camera_calib_python | 9427ff31d55af7619e7aee74136446a31d10def0 | [
"Apache-2.0"
] | 2 | 2021-01-07T20:13:31.000Z | 2021-01-08T18:16:53.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: image.ipynb (unless otherwise specified).
__all__ = ['Img', 'FileImg', 'File16bitImg', 'ArrayImg']
# Cell
import warnings
import numpy as np
import torch
from PIL import Image
from .utils import *
# Cell
# Cell
# Cell
# Cell | 32.527778 | 91 | 0.61614 |
05d462566b4d5254250d288dd86dc436b3f67818 | 2,144 | py | Python | einshape/src/jax/jax_ops.py | LaudateCorpus1/einshape | b1a0e696c20c025074f09071790b97b42754260d | [
"Apache-2.0"
] | 38 | 2021-07-23T12:00:08.000Z | 2022-03-18T08:40:33.000Z | einshape/src/jax/jax_ops.py | LaudateCorpus1/einshape | b1a0e696c20c025074f09071790b97b42754260d | [
"Apache-2.0"
] | 1 | 2021-10-05T16:20:23.000Z | 2021-10-05T16:20:23.000Z | einshape/src/jax/jax_ops.py | LaudateCorpus1/einshape | b1a0e696c20c025074f09071790b97b42754260d | [
"Apache-2.0"
] | 3 | 2021-08-04T16:18:29.000Z | 2021-11-13T14:33:20.000Z | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Einshape implementation for JAX."""
from typing import Any, Union
from einshape.src import abstract_ops
from einshape.src import backend
from jax import lax
import jax.numpy as jnp
def einshape(
equation: str,
value: Union[jnp.ndarray, Any],
**index_sizes: int
) -> jnp.ndarray:
"""Reshapes `value` according to the given Shape Equation.
Args:
equation: The Shape Equation specifying the index regrouping and reordering.
value: Input tensor, or tensor-like object.
**index_sizes: Sizes of indices, where they cannot be inferred
from `input_shape`.
Returns:
Tensor derived from `value` by reshaping as specified by `equation`.
"""
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
return _JaxBackend().exec(equation, value, value.shape, **index_sizes)
| 33.5 | 80 | 0.726213 |
05d4760733051270e73120a1ac9a61ea86e6cde5 | 1,800 | py | Python | DOOM.py | ariel139/DOOM-port-scanner | 328678b9f79855de472967f1a3e4b3e9181a3706 | [
"MIT"
] | 6 | 2020-11-24T06:51:02.000Z | 2022-02-26T23:19:46.000Z | DOOM.py | ariel139/DOOM-port-scanner | 328678b9f79855de472967f1a3e4b3e9181a3706 | [
"MIT"
] | null | null | null | DOOM.py | ariel139/DOOM-port-scanner | 328678b9f79855de472967f1a3e4b3e9181a3706 | [
"MIT"
] | null | null | null | import socket
from IPy import IP
print("""
You are using the DOOM Port scanner.
This tool is for educational purpose ONLY!!!!
1. You can change the range of the ports you want to scan.
2. You can change the speedof the scan
3. you can scan a list of targets by using ', ' after each target
4. You can scan both URL links and both IP's
""")
# ip adresess
targets = input("enter targets or URL's ")
# min range of ports
min_port = int(input("enter min number of ports "))
# max range of ports
max_port = int(input("enter max number of ports "))
try:
speed = int(input("Enter the speed you want to scan in (try using a Irrational number, deffult is 0.1) "))
except:
speed = 0.1
# check if the ip is URL link or ip
# scan port function
# converted ip adress to link and int ip
if ', ' in targets:
for ip_add in targets.split(','):
multi_targets(ip_add.strip(' '))
else:
multi_targets(targets)
| 24.657534 | 111 | 0.597778 |
05d4a6a91e58732f8757086328fccaf5f8b61a70 | 9,380 | py | Python | finding_models/testing_classifiers.py | NtMalDetect/NtMalDetect | 5bf8f35491bf8081d0b721fa1bf90582b410ed74 | [
"MIT"
] | 10 | 2018-01-04T07:59:59.000Z | 2022-01-17T08:56:33.000Z | finding_models/testing_classifiers.py | NtMalDetect/NtMalDetect | 5bf8f35491bf8081d0b721fa1bf90582b410ed74 | [
"MIT"
] | 2 | 2020-01-12T19:32:05.000Z | 2020-04-11T09:38:07.000Z | finding_models/testing_classifiers.py | NtMalDetect/NtMalDetect | 5bf8f35491bf8081d0b721fa1bf90582b410ed74 | [
"MIT"
] | 1 | 2018-08-31T04:13:43.000Z | 2018-08-31T04:13:43.000Z | from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
from sklearn.utils import shuffle
useTFIDF = True
showSampleVector = False
showMostInformativeFeatures = True
howManyInformativeFeatures = 10
nGRAM1 = 10
nGRAM2 = 10
weight = 10
ask = input("Do you want to specify parameters or use default values? Input 'T' or 'F'. ")
if ask == "T":
useTFIDFStr = input("Do you want to use tfidfVectorizer or CountVectorizer? Type T for tfidfVectorizer and F for CountVectorizer ")
if useTFIDFStr == "T":
useTFIDF = True
else:
useTFIDF = False
showSampleVectorStr = input("Do you want to print an example vectorized corpus? (T/F) ")
if showSampleVectorStr == "T":
showSampleVector = True
else:
showSampleVector = False
showMostInformativeFeaturesStr = input("Do you want to print the most informative feature in some of the classifiers? (T/F) ")
if showMostInformativeFeaturesStr == "T":
showMostInformativeFeatures = True
howManyInformativeFeatures = int(input("How many of these informative features do you want to print for each binary case? Input a number "))
else:
showMostInformativeFeatures = False
nGRAM1 = int(input("N-Gram lower bound (Read README.md for more information)? Input a number "))
nGRAM2 = int(input("N-Gram Upper bound? Input a number "))
weight = int(input("What weight do you want to use to separate train & testing? Input a number "))
main_corpus = []
main_corpus_target = []
my_categories = ['benign', 'malware']
# feeding corpus the testing data
print("Loading system call database for categories:")
print(my_categories if my_categories else "all")
import glob
import os
malCOUNT = 0
benCOUNT = 0
for filename in glob.glob(os.path.join('./sysMAL', '*.txt')):
fMAL = open(filename, "r")
aggregate = ""
for line in fMAL:
linea = line[:(len(line)-1)]
aggregate += " " + linea
main_corpus.append(aggregate)
main_corpus_target.append(1)
malCOUNT += 1
for filename in glob.glob(os.path.join('./sysBEN', '*.txt')):
fBEN = open(filename, "r")
aggregate = ""
for line in fBEN:
linea = line[:(len(line) - 1)]
aggregate += " " + linea
main_corpus.append(aggregate)
main_corpus_target.append(0)
benCOUNT += 1
# shuffling the dataset
main_corpus_target, main_corpus = shuffle(main_corpus_target, main_corpus, random_state=0)
# weight as determined in the top of the code
train_corpus = main_corpus[:(weight*len(main_corpus)//(weight+1))]
train_corpus_target = main_corpus_target[:(weight*len(main_corpus)//(weight+1))]
test_corpus = main_corpus[(len(main_corpus)-(len(main_corpus)//(weight+1))):]
test_corpus_target = main_corpus_target[(len(main_corpus)-len(main_corpus)//(weight+1)):]
print("%d documents - %0.3fMB (training set)" % (
len(train_corpus_target), train_corpus_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(test_corpus_target), test_corpus_size_mb))
print("%d categories" % len(my_categories))
print()
print("Benign Traces: "+str(benCOUNT)+" traces")
print("Malicious Traces: "+str(malCOUNT)+" traces")
print()
print("Extracting features from the training data using a sparse vectorizer...")
t0 = time()
if useTFIDF:
vectorizer = TfidfVectorizer(ngram_range=(nGRAM1, nGRAM2), min_df=1, use_idf=True, smooth_idf=True) ##############
else:
vectorizer = CountVectorizer(ngram_range=(nGRAM1, nGRAM2))
analyze = vectorizer.build_analyzer()
if showSampleVector:
print(analyze(test_corpus[1]))
X_train = vectorizer.fit_transform(train_corpus)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, train_corpus_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer...")
t0 = time()
X_test = vectorizer.transform(test_corpus)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, test_corpus_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# show which are the definitive features
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3), showMostInformativeFeatures))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty), showMostInformativeFeatures))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# plotting results
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| 31.059603 | 150 | 0.698294 |
05d5479edfdc67ed72d1fed7ba706e163051f970 | 5,953 | py | Python | neutron/tests/fullstack/test_firewall.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 1 | 2018-10-19T01:48:37.000Z | 2018-10-19T01:48:37.000Z | neutron/tests/fullstack/test_firewall.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | neutron/tests/fullstack/test_firewall.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron.agent.common import ovs_lib
from neutron.agent.linux import iptables_firewall
from neutron.agent.linux import iptables_manager
from neutron.agent.linux.openvswitch_firewall import iptables as ovs_iptables
from neutron.common import utils
from neutron.tests.common import machine_fixtures
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
LOG = logging.getLogger(__name__)
| 38.908497 | 79 | 0.666891 |
05d679b96fcc27f56541b2f87e6ba4b22f90adbe | 709 | py | Python | Analysis/pdf_to_txt.py | ashishnitinpatil/resanalysersite | 0604d2fed4760be741c4d90b6d230d0f2cd8bf9e | [
"CC-BY-4.0"
] | null | null | null | Analysis/pdf_to_txt.py | ashishnitinpatil/resanalysersite | 0604d2fed4760be741c4d90b6d230d0f2cd8bf9e | [
"CC-BY-4.0"
] | null | null | null | Analysis/pdf_to_txt.py | ashishnitinpatil/resanalysersite | 0604d2fed4760be741c4d90b6d230d0f2cd8bf9e | [
"CC-BY-4.0"
] | null | null | null | from pdfminer.pdfinterp import PDFResourceManager, process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from cStringIO import StringIO
with open('C:\\Users\\ashis\\Desktop\\CIVIL ENGINEERING.txt', 'w') as to_write:
to_write.write(convert_pdf('C:\\Users\\ashis\\Desktop\\CIVIL ENGINEERING.pdf'))
| 27.269231 | 83 | 0.712271 |
05d6c824429b4f5fccdfe1433815eb6c96e18c8f | 480 | py | Python | local/handler/TravisHandler.py | fasterit/supybot-github | 37b80046c0f0d5a66b2107a63e380002adbb66f5 | [
"MIT"
] | 7 | 2016-07-16T22:16:37.000Z | 2021-06-14T20:45:37.000Z | local/handler/TravisHandler.py | fasterit/supybot-github | 37b80046c0f0d5a66b2107a63e380002adbb66f5 | [
"MIT"
] | 30 | 2015-06-03T22:40:28.000Z | 2022-02-11T08:49:44.000Z | local/handler/TravisHandler.py | fasterit/supybot-github | 37b80046c0f0d5a66b2107a63e380002adbb66f5 | [
"MIT"
] | 5 | 2018-01-12T21:28:50.000Z | 2020-10-01T13:44:09.000Z | from ..utility import *
| 34.285714 | 82 | 0.554167 |
05d8328fda38c6d6fda5c13e5f09ac74925e7f3b | 10,417 | py | Python | pyart/io/tests/test_mdv_radar.py | josephhardinee/pyart | 909cd4a36bb4cae34349294d2013bc7ad71d0969 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | pyart/io/tests/test_mdv_radar.py | josephhardinee/pyart | 909cd4a36bb4cae34349294d2013bc7ad71d0969 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | pyart/io/tests/test_mdv_radar.py | josephhardinee/pyart | 909cd4a36bb4cae34349294d2013bc7ad71d0969 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | """ Unit Tests for Py-ART's io/mdv_radar.py module. """
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.ma.core import MaskedArray
import pyart
############################################
# read_mdv tests (verify radar attributes) #
############################################
# read in the sample file and create a a Radar object
radar = pyart.io.read_mdv(pyart.testing.MDV_PPI_FILE)
# time attribute
# range attribute
# fields attribute is tested later
# metadata attribute
# scan_type attribute
# latitude attribute
# longitude attribute
# altitude attribute
# altitude_agl attribute
# sweep_number attribute
# sweep_mode attribute
# fixed_angle attribute
# sweep_start_ray_index attribute
# sweep_end_ray_index attribute
# target_scan_rate attribute
# azimuth attribute
# elevation attribute
# scan_rate attribute
# antenna_transition attribute
# instrument_parameters attribute
# radar_parameters attribute
# radar_calibration attribute
# ngates attribute
# nrays attribute
# nsweeps attribute
####################
# fields attribute #
####################
def check_field_dic(field):
""" Check that the required keys are present in a field dictionary. """
assert 'standard_name' in radar.fields[field]
assert 'units' in radar.fields[field]
assert '_FillValue' in radar.fields[field]
assert 'coordinates' in radar.fields[field]
#############
# RHI tests #
#############
RADAR_RHI = pyart.io.read_mdv(pyart.testing.MDV_RHI_FILE,
delay_field_loading=True)
# nsweeps attribute
# sweep_number attribute
# sweep_mode attribute
# fixed_angle attribute
# sweep_start_ray_index attribute
# sweep_end_ray_index attribute
# azimuth attribute
# elevation attribute
# field data
| 29.179272 | 75 | 0.707977 |
05d878ca2e433fc4c0d9802abde19f10dbc8863e | 2,430 | py | Python | model/UserAccess.py | EmbeddedSoftwareCaiShuPeng/vehicleDispatcher | aacebb1656fe095485041de0bcbb67627e384abc | [
"MIT"
] | 1 | 2016-04-27T14:23:53.000Z | 2016-04-27T14:23:53.000Z | model/UserAccess.py | EmbeddedSoftwareCaiShuPeng/vehicleDispatcher | aacebb1656fe095485041de0bcbb67627e384abc | [
"MIT"
] | null | null | null | model/UserAccess.py | EmbeddedSoftwareCaiShuPeng/vehicleDispatcher | aacebb1656fe095485041de0bcbb67627e384abc | [
"MIT"
] | null | null | null | import uuid, json, os, pymongo
from models import User
| 24.545455 | 59 | 0.475309 |
05ddcfc4ce86d56934f5e0733a719cb7c2450e6f | 969 | py | Python | sdk/python/pulumi_google_native/genomics/v1alpha2/_enums.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/genomics/v1alpha2/_enums.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/genomics/v1alpha2/_enums.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'DiskType',
]
| 30.28125 | 136 | 0.672859 |
05df1e31c5373f19f615a0dfa51f726a3fbefbbb | 634 | py | Python | plugins/startHelp.py | REX-BOTZ/MegaUploaderbot-1 | 025fd97344da388fe607f5db73ad9f4435f51baa | [
"Apache-2.0"
] | 2 | 2021-11-12T13:15:03.000Z | 2021-11-13T12:17:33.000Z | plugins/startHelp.py | REX-BOTZ/MegaUploaderbot-1 | 025fd97344da388fe607f5db73ad9f4435f51baa | [
"Apache-2.0"
] | null | null | null | plugins/startHelp.py | REX-BOTZ/MegaUploaderbot-1 | 025fd97344da388fe607f5db73ad9f4435f51baa | [
"Apache-2.0"
] | 1 | 2022-01-07T09:55:53.000Z | 2022-01-07T09:55:53.000Z | #!/usr/bin/env python3
"""Importing"""
# Importing Common Files
from botModule.importCommon import *
"""Start Handler"""
"""Help Handler"""
| 26.416667 | 71 | 0.728707 |
05e108ee92867afb8794b956bcf9b413dc00ac01 | 206 | py | Python | webSys/dbweb/util/__init__.py | Qiumy/FIF | 8c9c58504ecab510dc0a96944f0031a3fd513d74 | [
"Apache-2.0"
] | 2 | 2018-12-21T02:01:03.000Z | 2019-10-17T08:07:04.000Z | webSys/dbweb/util/__init__.py | Qiumy/FIF | 8c9c58504ecab510dc0a96944f0031a3fd513d74 | [
"Apache-2.0"
] | null | null | null | webSys/dbweb/util/__init__.py | Qiumy/FIF | 8c9c58504ecab510dc0a96944f0031a3fd513d74 | [
"Apache-2.0"
] | 1 | 2018-06-01T07:56:09.000Z | 2018-06-01T07:56:09.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
filter_blueprint = Blueprint('filters', __name__)
# Register all the filter.
from . import time_process, text_process, user_manage | 29.428571 | 53 | 0.747573 |
05e10cbd60c9a8c4e9d6e849c57e56e13a3dc1f5 | 3,596 | py | Python | Code/network_model_HiCoDe.py | AbinavRavi/Network_Analysis_Eur_Parl | dea84d3375eea07676e0193d575e3deef76312bc | [
"MIT"
] | 1 | 2020-12-15T16:35:20.000Z | 2020-12-15T16:35:20.000Z | Code/network_model_HiCoDe.py | AbinavRavi/Network_Analysis_Eur_Parl | dea84d3375eea07676e0193d575e3deef76312bc | [
"MIT"
] | null | null | null | Code/network_model_HiCoDe.py | AbinavRavi/Network_Analysis_Eur_Parl | dea84d3375eea07676e0193d575e3deef76312bc | [
"MIT"
] | null | null | null | import numpy as np
import scipy as sp
import pandas as pd
import ast
import itertools
from itertools import product
from collections import Counter
import networkx as nx
import network_utils as nu
import hicode as hc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('classic')
# -----------------------------------------------------------------------------------------------------------------------
## Loading data
topicDF = pd.read_csv('../Topics/topicsData350.csv')
topicDF['date'] = pd.to_datetime(topicDF['date'])
# topicDF_part = topicDF[(topicDF.date < '2001-07-01') & (topicDF.date >= '2000-07-01')]
# topicDF_part = topicDF[topicDF.date == '2000-07-01']
sit = 0
count = Counter([])
for i in range(58):
year = 1999 + (i + 6) // 12
month = (i + 6) % 12 + 1
date = '{:4d}-{:02d}-01'.format(year, month)
year = 1999 + (i + 9) // 12
month = (i + 9) % 12 + 1
date2 = '{:4d}-{:02d}-01'.format(year, month)
topicDF_part = topicDF[(topicDF.date < date2) & (topicDF.date >= date)]
if topicDF_part.shape[0] == 0:
continue
else:
sit += 1
f = open('../data/outliers.txt', 'a')
f.write('{:s}\n'.format(date))
print(date)
# -----------------------------------------------------------------------------------------------------------------------
## Building network
network = nu.build_network(topicDF_part, 350, exclude=[])
#print(len(network.nodes()))
bottom_nodes = [n for n in network.nodes() if n not in range(350)]
network = nu.fold_network(network, bottom_nodes, mode='single')
network = nu.normalize_edgeweight(network)
# -----------------------------------------------------------------------------------------------------------------------
## Analyzing network
networks, partitions = hc.hicode(network, True)
candidates = [(u, v) for u, v in product(network.nodes(), network.nodes()) if
u != v and partitions[0][u] != partitions[0][v]]
for i in range(1,len(partitions)):
candidates = [(u,v) for u, v in candidates if partitions[i][u] == partitions[i][v]]
candidates = [(u,v) for u,v in candidates]
# candidates.sort()
# candidates = list(k for k,_ in itertools.groupby(candidates))
# print(candidates)
# candidates = [tuple(c) for c in candidates ]
count+=Counter(candidates)
count = dict(count)
count = sorted(count.items(), key=lambda kv: kv[1], reverse=True)
with open('../Results_Hicode/first_session_redweight.txt', 'w') as f:
f.write('Total sittings: {:d}\n\n'.format(int(sit)))
for k, v in count:
f.write('{:s}: {:d}, {:f}\n'.format(str(k), int(v), v / sit))
# -----------------------------------------------------------------------------------------------------------------------
## Drawing network
# for i in range(len(networks)):
# plt.figure()
# values = [partitions[0].get(n) for n in networks[i].nodes()]
# removeE = [e for e in networks[i].edges() if partitions[i][e[0]] != partitions[i][e[1]]]
# networks[i].remove_edges_from(removeE)
# pos = nx.spring_layout(networks[i], iterations=15, weight='weight')
# sizes = [50 * nu.node_weight(networks[i], node) for node in networks[i].nodes()]
# weights = [networks[i][u][v]['weight'] for u, v, in networks[i].edges()]
# nc = nx.draw_networkx_nodes(networks[i], pos, with_labels=False, node_color=values, node_size=sizes, alpha=0.4,
# cmap=cm.gist_rainbow)
# nx.draw_networkx_edges(networks[i], pos, width=weights)
# plt.axis('off')
# plt.colorbar(nc)
# plt.show()
| 38.666667 | 121 | 0.547553 |
05e2589d4291356b8e585fa87a27f0d7fe177954 | 209 | py | Python | py_battlescribe/shared/rules.py | akabbeke/py_battlescribe | 7f96d44295d46810268e666394e3e3238a6f2c61 | [
"MIT"
] | 1 | 2021-11-17T22:00:21.000Z | 2021-11-17T22:00:21.000Z | py_battlescribe/shared/rules.py | akabbeke/py_battlescribe | 7f96d44295d46810268e666394e3e3238a6f2c61 | [
"MIT"
] | null | null | null | py_battlescribe/shared/rules.py | akabbeke/py_battlescribe | 7f96d44295d46810268e666394e3e3238a6f2c61 | [
"MIT"
] | null | null | null | from ..bs_node.iterable import BSNodeIterable
from ..bs_reference.iter import BSReferenceIter
| 26.125 | 47 | 0.794258 |
05e43c552c5879146cf3f036c106616fa493ebaa | 5,487 | py | Python | priorgen/pca_utils.py | joshjchayes/PriorGen | 228be0b06dca29ad2ad33ae216f494eaead6161f | [
"MIT"
] | 1 | 2021-12-09T10:29:20.000Z | 2021-12-09T10:29:20.000Z | priorgen/pca_utils.py | joshjchayes/PriorGen | 228be0b06dca29ad2ad33ae216f494eaead6161f | [
"MIT"
] | null | null | null | priorgen/pca_utils.py | joshjchayes/PriorGen | 228be0b06dca29ad2ad33ae216f494eaead6161f | [
"MIT"
] | null | null | null | '''
pca_utils.py
Module containing functions to run PCAs, and generate diagnostic plots
'''
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
def run_PCA(parameters, observables, n_components):
'''
Runs a principal component analysis to reduce dimensionality of
observables.
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
Returns
-------
pca : sklearn.decomposition.PCA
The scikit-learn PCA object
reduced_d_observables : array_like, shape(N, n_components)
The observables after PCA has been applied to them
'''
pca = PCA(n_components=n_components)
fitted_pca = pca.fit(observables)
reduced_d_observables = fitted_pca.transform(observables)
return pca, reduced_d_observables
def pca_plot(parameters, observables, n_components, save=True,
save_path='PCA_plot.pdf'):
'''
Produces a plot of the explained variance of the first n_components
principal components, along with a cumulative variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
n_components : int
The number of principal components to keep
save : bool, optional:
If True, will save the output figure to save_path. Default is True.
save_path : str, optional
If save is True, this is the path that the figures will
be saved to. Default is 'PCA_plot.pdf'.
Returns
-------
fig : matplotlib.Figure
The pca plot
'''
pca, _ = run_PCA(parameters, observables, n_components)
variance = pca.explained_variance_ratio_
cumulative_variance = np.cumsum(variance).round(4)
fig, ax = plt.subplots(2,1, sharex=True)
# Plot the
ax[0].bar(np.arange(n_components), variance, label='Associated variance')
#ax[0].set_xlabel('Principal component')
ax[0].set_ylabel('Fractional variance')
ax[0].set_yscale('log')
ax[1].plot(np.arange(n_components), cumulative_variance, 'r', label='Cumulative variance')
ax[1].set_xlabel('Principal component')
ax[1].set_ylabel('Cumulative variance')
ax[1].margins(x=0.01)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
if save:
fig.savefig(save_path)
return fig
def find_required_components(parameters, observables, variance):
'''
Calculates the number of principal components required for reduced
dimensionality obserables to contain a given fraction of explained variance
Parameters
----------
parameters : array_like, shape (N, M)
The physical parameter values for each point we are training the
ML classifier on. N is the number of points, whilst M is the
physical value for each parameter. These are all assumed to be in
the same order. We assume that there are M variables in the model,
and that none of them are constants.
observables : array_like, shape (N, X)
The observables associated with each of the parameters. We assume
that the observables are 1D arrays where each entry is directly
comparable. For example, it could be F(t), but where each entry is
at the same value of t.
variance : float
The fraction of explained variance you want the principal components
to contain
Returns
-------
n_components : int
The smallest number of principal comonents required to contain the
specified fraction of explained variance
'''
if not 0 <= variance < 1:
raise ValueError('variance must be between 0 and 1')
# run PCA and keep all components
pca, _ = run_PCA(parameters, observables, None)
cumulative_variance = np.cumsum(pca.explained_variance_ratio_)
# The +1 is required because the first part finds an index where the
# cumulative explained variance ratio is larger than the threshold
# and the indices start from 0
n_PCs = np.where(cumulative_variance >= variance)[0][0] + 1
if n_PCs > 30:
print('WARNING: {} principal components are required - this may lead to slow run times.'.format(n_PCs))
return n_PCs
| 35.862745 | 111 | 0.686896 |
05e5ab63cfbf61b1260c3430dac86bcf4cae1b06 | 17,452 | py | Python | prompt_tuning/data/super_glue.py | techthiyanes/prompt-tuning | 9f4d7082aa6dbd955e38488d6d3fa5a7c039f6c7 | [
"Apache-2.0"
] | 108 | 2021-11-05T21:44:27.000Z | 2022-03-31T14:19:30.000Z | prompt_tuning/data/super_glue.py | techthiyanes/prompt-tuning | 9f4d7082aa6dbd955e38488d6d3fa5a7c039f6c7 | [
"Apache-2.0"
] | 172 | 2022-02-01T00:08:39.000Z | 2022-03-31T12:44:07.000Z | prompt_tuning/data/super_glue.py | dumpmemory/prompt-tuning | bac77e4f5107b4a89f89c49b14d8fe652b1c5734 | [
"Apache-2.0"
] | 9 | 2022-01-16T11:55:18.000Z | 2022-03-06T23:26:36.000Z | # Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Special version of the SuperGlue Tasks.
The main task formats here are:
* super_glue_{name}_v102_examples
* mt5_super_glue_{name}_v102_examples
* taskless_super_glue_{name}_v102
* taskless_super_glue_{name}_v102_examples
* mt5_taskless_super_glue_{name}_v102
* mt5_taskless_super_glue_{name}_v102_examples
Any task that starts with `mT5` uses the `mT5` vocab. Any task that ends with
`examples` is setup to log intermediate examples to tensorboard. Any task with
`taskless` does not have the task name as the initial text token (like t5 tasks
do). Any task with `task_index` in the name has a special task index as the
initial post-integerization token.
"""
import functools
from prompt_tuning.data import features
from prompt_tuning.data import metrics as pt_metrics
from prompt_tuning.data import postprocessors as pt_postprocessors
from prompt_tuning.data import preprocessors as pt_preprocessors
from prompt_tuning.data import utils
import seqio
from t5.data import postprocessors
from t5.data import preprocessors
from t5.data.glue_utils import get_glue_postprocess_fn
from t5.data.glue_utils import get_glue_text_preprocessor
from t5.data.glue_utils import get_super_glue_metric
from t5.evaluation import metrics
import tensorflow_datasets as tfds
super_glue_task_indexer = utils.task_mapping(
tuple(b.name
for b in tfds.text.super_glue.SuperGlue.builder_configs.values()), {
"wsc.fixed": "wsc",
"axb": "rte",
"axg": "rte"
})
for model_prefix, feats in features.MODEL_TO_FEATURES.items():
for log_examples in (True, False):
# ========== SuperGlue ==========
# This section adds the core SuperGlue tasks. We do not include WSC in this
# loop WSC has different setting for training and validation because t5
# casts it as a short text generation task instead of as classification (via
# generation of class labels). We will add that as a mixture later.
for b in tfds.text.super_glue.SuperGlue.builder_configs.values():
if "wsc" in b.name:
continue
if log_examples:
postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples,
get_glue_postprocess_fn(b))
metric_fns = [
functools.partial(pt_metrics.metric_with_examples, func)
for func in get_super_glue_metric(b.name)
] + [functools.partial(pt_metrics.text_examples, task_name=b.name)]
examples_suffix = "_examples"
else:
postprocess_fn = get_glue_postprocess_fn(b)
metric_fns = get_super_glue_metric(b.name)
examples_suffix = ""
# The axb task needs to be rekeyed before we apply the glue text
# preprocessor, instead of detecting this and registering axb different
# (which would need to be repeated for each variant of the dataset we
# have) we have a list of preprocessors, for most tasks this is empty and
# for axb it has the rekey function. Then when we register a task we add
# the text processor to this list and it all works out. We can't
# predefined the full list upfront (like they do in t5) because the actual
# text preprocessor can be different for tasks like the taskless version.
pre_preprocessors = []
if b.name == "axb":
pre_preprocessors = [
functools.partial(
preprocessors.rekey,
key_map={
"premise": "sentence1",
"hypothesis": "sentence2",
"label": "label",
"idx": "idx"
})
]
# The default tasks have already be register elsewhere so only add the
# example logging version
if log_examples:
seqio.TaskRegistry.add(
f"{model_prefix}super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b), seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# This version of the task removes the initial text token of the dataset
# name
seqio.TaskRegistry.add(
f"{model_prefix}taskless_super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# This version of the task adds a task index to the first token.
seqio.TaskRegistry.add(
f"{model_prefix}task_index_super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer[b.name]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# ========= Definite Pronoun Resolution =========
# Similar to the Winograd Schema Challenge but doesn't require semantic
# knowledge to disambiguate between two different options. Training on this
# has been shown to be effective for increasing performance on WSC.
# [Kocijan, et. al., 2019](https://arxiv.org/abs/1905.06290)
if log_examples:
dpr_postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples, utils.identity),
dpr_metric_fns = [
functools.partial(pt_metrics.metric_with_examples, metrics.accuracy)
] + [functools.partial(pt_metrics.text_examples, task_name="dpr")]
else:
dpr_postprocess_fn = utils.identity
dpr_metric_fns = [metrics.accuracy]
# DPR without the initial dataset text token.
seqio.TaskRegistry.add(
f"{model_prefix}taskless_dpr_v001_simple{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="definite_pronoun_resolution:1.1.0"),
preprocessors=[
preprocessors.definite_pronoun_resolution_simple,
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=dpr_postprocess_fn,
metric_fns=dpr_metric_fns,
output_features=feats,
)
seqio.TaskRegistry.add(
f"{model_prefix}task_index_dpr_v001_simple{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="definite_pronoun_resolution:1.1.0"),
preprocessors=[
preprocessors.definite_pronoun_resolution_simple,
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=dpr_postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# ========== WSC ==========
# This adds a "simplified" version of WSC like they do in t5. Instead of
# predicting if the supplied referent matches the highlighted pronoun in the
# text, the model generate a referent. If the referent matches the supplied
# one then the model predictions True, otherwise it will predict false. This
# means that we can only train on examples where the referent is correct.
# T5 does WSC in two different tasks. The first is a training task that only
# uses examples where the referent is true. We never do any evaluation on
# this dataset so the training data doesn't need anything like post
# processors or metric_fns. The second task is the evaluation task. This
# considers all examples and does use the output functions. These tasks are
# then combined into a mixture.
# Looking at positive and negative examples of WSC can be hard. If the label
# is 1 then the target referent should match the models predicted referent.
# If they match this examples was correct, if they don't the model was
# wrong. If the label is 0, then the target referent is not correct and we
# hope the model output something different.
if log_examples:
postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples,
postprocessors.wsc_simple)
metric_fns = [
functools.partial(pt_metrics.metric_with_examples, metrics.accuracy),
functools.partial(pt_metrics.text_examples, task_name="wsc")
]
else:
postprocess_fn = postprocessors.wsc_simple
metric_fns = [metrics.accuracy]
if log_examples:
# This version outputs examples to tensorboard.
seqio.TaskRegistry.add(
f"{model_prefix}super_glue_wsc_v102_simple_eval{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=("validation", "test")),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
# This mixture is WSC where predictions are output to tensorboard.
seqio.MixtureRegistry.add(
f"{model_prefix}super_glue_wsc_and_dev_v102_simple{examples_suffix}",
[
# We don't need a special version of the training data because it
# is never processed for output anyway.
f"{model_prefix}super_glue_wsc_v102_simple_train",
f"{model_prefix}super_glue_wsc_v102_simple_eval{examples_suffix}"
],
default_rate=1.0)
# This version remove the initial dataset text token.
seqio.TaskRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2", splits=("train",)),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=True),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[],
output_features=feats)
seqio.TaskRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=["validation", "test"]),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
seqio.MixtureRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_and_dev_v102_simple"
f"{examples_suffix}"),
[
# We don't need a special version of the training data because it is
# never processed for output anyway.
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}")
],
default_rate=1.0)
# This version adds a task index as the first token.
seqio.TaskRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2", splits=("train",)),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=True),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[],
output_features=feats)
seqio.TaskRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=["validation", "test"]),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
seqio.MixtureRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_and_dev_v102_simple"
f"{examples_suffix}"),
[(f"{model_prefix}task_index_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}")],
default_rate=1.0)
# =========== Mixtures ==========
# These are Mixtures of the task index tasks to train on all super glue tasks
# at once.
# This is a copy of the super glue weights from t5 but adapted to use the task
# index version of the datasets.
WEIGHT_MAPPING = {
"task_index_super_glue_wsc_v102_simple_train": 259.,
"task_index_super_glue_wsc_v102_simple_eval_examples": 0.,
"task_index_super_glue_boolq_v102_examples": 9_427.,
"task_index_super_glue_cb_v102_examples": 250.,
"task_index_super_glue_copa_v102_examples": 400.,
"task_index_super_glue_multirc_v102_examples": 27_243.,
"task_index_super_glue_record_v102_examples": 138_854.,
"task_index_super_glue_rte_v102_examples": 2_490.,
"task_index_super_glue_wic_v102_examples": 5_428.,
}
WEIGHT_MAPPING_WITH_DPR = {
"task_index_dpr_v001_simple_examples": 1_322.,
"task_index_super_glue_wsc_v102_simple_train": 259.,
"task_index_super_glue_wsc_v102_simple_eval_examples": 0.,
"task_index_super_glue_boolq_v102_examples": 9_427.,
"task_index_super_glue_cb_v102_examples": 250.,
"task_index_super_glue_copa_v102_examples": 400.,
"task_index_super_glue_multirc_v102_examples": 27_243.,
"task_index_super_glue_record_v102_examples": 138_854.,
"task_index_super_glue_rte_v102_examples": 2_490.,
"task_index_super_glue_wic_v102_examples": 5_428.,
}
seqio.MixtureRegistry.add("task_index_super_glue_v102_examples_proportional",
list(WEIGHT_MAPPING.items()))
seqio.MixtureRegistry.add(
"task_index_super_glue_with_dpr_v102_examples_proportional",
list(WEIGHT_MAPPING_WITH_DPR.items()))
| 42.77451 | 80 | 0.67276 |
05e5bab9ff77cdee550c0152d15077d78e190eff | 952 | py | Python | src/runtime/tasks.py | HitLuca/predict-python | 14f2f55cb29f817a5871d4c0b11a3758285301ca | [
"MIT"
] | null | null | null | src/runtime/tasks.py | HitLuca/predict-python | 14f2f55cb29f817a5871d4c0b11a3758285301ca | [
"MIT"
] | null | null | null | src/runtime/tasks.py | HitLuca/predict-python | 14f2f55cb29f817a5871d4c0b11a3758285301ca | [
"MIT"
] | null | null | null | from django_rq.decorators import job
from src.core.core import runtime_calculate
from src.jobs.models import JobStatuses
from src.jobs.ws_publisher import publish
from src.logs.models import Log
from src.utils.file_service import get_log
| 30.709677 | 65 | 0.657563 |
05e6f09ddfc0212cb3f08469b5c83b81051137ad | 99 | py | Python | django_models_from_csv/__init__.py | themarshallproject/django-collaborative | 1474b9737eaea35eb11b39380b35c2a801831d9c | [
"MIT"
] | 88 | 2019-05-17T19:52:44.000Z | 2022-03-28T19:43:07.000Z | django_models_from_csv/__init__.py | themarshallproject/django-collaborative | 1474b9737eaea35eb11b39380b35c2a801831d9c | [
"MIT"
] | 65 | 2019-05-17T20:06:18.000Z | 2021-01-13T03:51:07.000Z | django_models_from_csv/__init__.py | themarshallproject/django-collaborative | 1474b9737eaea35eb11b39380b35c2a801831d9c | [
"MIT"
] | 15 | 2019-07-09T20:48:14.000Z | 2021-07-24T20:45:55.000Z | default_app_config = 'django_models_from_csv.apps.DjangoDynamicModelsConfig'
__version__ = "1.1.0"
| 33 | 76 | 0.838384 |
05e70bf4fcafed340bac69f51837c437a43b38d8 | 93 | py | Python | utensor_cgen/backend/utensor/code_generator/__init__.py | uTensor/utensor_cgen | eccd6859028d0b6a350dced25ea72ff02faaf9ad | [
"Apache-2.0"
] | 49 | 2018-01-06T12:57:56.000Z | 2021-09-03T09:48:32.000Z | utensor_cgen/backend/utensor/code_generator/__init__.py | uTensor/utensor_cgen | eccd6859028d0b6a350dced25ea72ff02faaf9ad | [
"Apache-2.0"
] | 101 | 2018-01-16T19:24:21.000Z | 2021-11-10T19:39:33.000Z | utensor_cgen/backend/utensor/code_generator/__init__.py | uTensor/utensor_cgen | eccd6859028d0b6a350dced25ea72ff02faaf9ad | [
"Apache-2.0"
] | 32 | 2018-02-15T19:39:50.000Z | 2020-11-26T22:32:05.000Z | from .legacy import uTensorLegacyCodeGenerator
from .rearch import uTensorRearchCodeGenerator | 46.5 | 46 | 0.903226 |
05ec45e9e0486f8c0920e8e4a6acabaf4897caee | 417 | py | Python | ch3/ricolisp/token.py | unoti/rico-lisp | 367f625dcd086e207515bdeb5561763754a3531c | [
"MIT"
] | null | null | null | ch3/ricolisp/token.py | unoti/rico-lisp | 367f625dcd086e207515bdeb5561763754a3531c | [
"MIT"
] | null | null | null | ch3/ricolisp/token.py | unoti/rico-lisp | 367f625dcd086e207515bdeb5561763754a3531c | [
"MIT"
] | null | null | null | from collections import UserString
from typing import List
| 37.909091 | 93 | 0.717026 |
05ed3bd6a82da190685915c3b42fde3a3b5e118a | 2,655 | py | Python | utils.py | ali-ramadhan/wxConch | 1106ce17d25f96a038ca784029261faafd7cfaf9 | [
"MIT"
] | 1 | 2019-03-09T01:10:59.000Z | 2019-03-09T01:10:59.000Z | utils.py | ali-ramadhan/weather-prediction-model-consensus | 1106ce17d25f96a038ca784029261faafd7cfaf9 | [
"MIT"
] | 1 | 2019-08-19T12:26:06.000Z | 2019-08-19T12:26:06.000Z | utils.py | ali-ramadhan/weather-prediction-model-consensus | 1106ce17d25f96a038ca784029261faafd7cfaf9 | [
"MIT"
] | null | null | null | import os
import time
import math
import logging.config
from datetime import datetime
from subprocess import run
from urllib.request import urlopen, urlretrieve
from urllib.parse import urlparse, urljoin
import smtplib, ssl
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from bs4 import BeautifulSoup
logging.config.fileConfig("logging.ini", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "wxConch (Python3.7) https://github.com/ali-ramadhan/wxConch",
"From": "alir@mit.edu"
}
| 28.858696 | 98 | 0.680979 |
05ed9c8e8fd31a9e77da54a3f25437648359aef1 | 1,987 | py | Python | aiida_fleur/cmdline/__init__.py | sphuber/aiida-fleur | df33e9a7b993a52c15a747a4ff23be3e19832b8d | [
"MIT"
] | null | null | null | aiida_fleur/cmdline/__init__.py | sphuber/aiida-fleur | df33e9a7b993a52c15a747a4ff23be3e19832b8d | [
"MIT"
] | null | null | null | aiida_fleur/cmdline/__init__.py | sphuber/aiida-fleur | df33e9a7b993a52c15a747a4ff23be3e19832b8d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jlich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
'''
Module for the command line interface of AiiDA-FLEUR
'''
import click
import click_completion
from aiida.cmdline.params import options, types
from .launch import cmd_launch
from .data import cmd_data
from .workflows import cmd_workflow
from .visualization import cmd_plot
# Activate the completion of parameter types provided by the click_completion package
# for bash: eval "$(_AIIDA_FLEUR_COMPLETE=source aiida-fleur)"
click_completion.init()
# Instead of using entrypoints and directly injecting verdi commands into aiida-core
# we created our own separete CLI because verdi will prob change and become
# less material science specific
# To avoid circular imports all commands are not yet connected to the root
# but they have to be here because of bash completion
cmd_root.add_command(cmd_launch)
cmd_root.add_command(cmd_data)
cmd_root.add_command(cmd_workflow)
cmd_root.add_command(cmd_plot)
| 43.195652 | 85 | 0.622043 |
05efd08ce434309fea6a153caaf4f36da65f692b | 243 | py | Python | textract/parsers/doc_parser.py | Pandaaaa906/textract | cee75460d3d43f0aa6f4967c6ccf069ee79fc560 | [
"MIT"
] | 1,950 | 2015-01-01T18:30:11.000Z | 2022-03-30T21:06:41.000Z | textract/parsers/doc_parser.py | nike199000/textract | 9d739f807351fd9e430a193eca853f5f2171a82a | [
"MIT"
] | 322 | 2015-01-05T09:54:45.000Z | 2022-03-28T17:47:15.000Z | textract/parsers/doc_parser.py | nike199000/textract | 9d739f807351fd9e430a193eca853f5f2171a82a | [
"MIT"
] | 470 | 2015-01-14T11:51:42.000Z | 2022-03-23T07:05:46.000Z | from .utils import ShellParser
| 22.090909 | 57 | 0.654321 |
05f2bf19df0a5655faf30da01ad995b33a5ff920 | 4,674 | py | Python | create_multi_langs/command_line.py | mychiux413/ConstConv | 6c2190d1bb37ae5cfef8464f88371db97719b032 | [
"MIT"
] | null | null | null | create_multi_langs/command_line.py | mychiux413/ConstConv | 6c2190d1bb37ae5cfef8464f88371db97719b032 | [
"MIT"
] | null | null | null | create_multi_langs/command_line.py | mychiux413/ConstConv | 6c2190d1bb37ae5cfef8464f88371db97719b032 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
from create_multi_langs.creater.go import CreaterGo
from create_multi_langs.creater.python import CreaterPython
from create_multi_langs.creater.python_typing import CreaterPythonTyping
from create_multi_langs.creater.typescript_backend import CreaterTypeScriptBackEnd # noqa: E501
from create_multi_langs.creater.typescript_frontend import CreaterTypeScriptFrontEnd # noqa: E501
from create_multi_langs.creater.javascript_backend import CreaterJavaScriptBackEnd # noqa: E501
from create_multi_langs.creater.javascript_frontend import CreaterJavaScriptFrontEnd # noqa: E501
import argparse
import time
import os
import sys
from functools import partial
VALID_EXTS = ['.py', '.go', '.ts', '.js', '.mjs']
if __name__ == "__main__":
main()
| 37.095238 | 99 | 0.627942 |
05f359b7dd7f8c17e74d1e4576ab789a5ca9047c | 297 | py | Python | test_resources/run_tests.py | tud-python-courses/lesson-builder | 11b1cc958723e9f75de27cde68daa0fdc18b929f | [
"MIT"
] | null | null | null | test_resources/run_tests.py | tud-python-courses/lesson-builder | 11b1cc958723e9f75de27cde68daa0fdc18b929f | [
"MIT"
] | null | null | null | test_resources/run_tests.py | tud-python-courses/lesson-builder | 11b1cc958723e9f75de27cde68daa0fdc18b929f | [
"MIT"
] | null | null | null | __author__ = 'Justus Adam'
__version__ = '0.1'
if __name__ == '__main__':
main()
else:
del main | 13.5 | 50 | 0.606061 |
05f89c6e9f8cabc37acf4ef72901aa6289131ace | 15,798 | py | Python | parse_to_latex.py | bkolosk1/bkolosk1-CrossLingualKeywords | 27cdc5075d1e30b02bb38891933a8fbb51957899 | [
"MIT"
] | 2 | 2021-04-19T23:57:58.000Z | 2021-11-02T08:40:16.000Z | parse_to_latex.py | bkolosk1/bkolosk1-CrossLingualKeywords | 27cdc5075d1e30b02bb38891933a8fbb51957899 | [
"MIT"
] | 1 | 2021-11-22T09:05:10.000Z | 2021-11-22T09:05:10.000Z | bert/parse_to_latex.py | bkolosk1/Extending-Neural-Keyword-Extraction-with-TF-IDF-tagset-matching | d52b9b9e1fb9130239479b1830b0930161672325 | [
"MIT"
] | null | null | null | import re
#parse_to_latex()
#get_averages()
#revert()
get_averages_reverted()
| 50.152381 | 223 | 0.44993 |
05fd8b2f68e0ad751b568376c91ded4488f3dd84 | 55,975 | py | Python | cc_bm_parallel_pyr_dev.py | xdenisx/ice_drift_pc_ncc | f2992329e8509dafcd37596271e80cbf652d14cb | [
"MIT"
] | 3 | 2021-11-10T04:03:10.000Z | 2022-02-27T10:36:02.000Z | cc_bm_parallel_pyr_dev.py | xdenisx/ice_drift_pc_ncc | f2992329e8509dafcd37596271e80cbf652d14cb | [
"MIT"
] | 1 | 2021-10-12T17:29:53.000Z | 2021-10-12T17:29:53.000Z | cc_bm_parallel_pyr_dev.py | xdenisx/ice_drift_pc_ncc | f2992329e8509dafcd37596271e80cbf652d14cb | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
# coding: utf-8
#
# Ice drift retrieval algorithm based on [1] from a pair of SAR images
# [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and Magic.
#
##################################################
# Last modification: 22 July, 2019
# TODO:
# 1) Pyramidal strategy (do we need this?)
# 2) add ocean cm maps ('Balance' for divergence)
##################################################
import cv2
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import time
import multiprocessing
from skimage.feature import match_template
from skimage.transform import rescale, resize, downscale_local_mean
from skimage import io, img_as_ubyte
from skimage.morphology import disk
from skimage.filters.rank import median
from skimage.filters import laplace
from skimage import exposure
from skimage.filters.rank import gradient
from skimage import filters
from sklearn.neighbors import KDTree
import sys
import sklearn.neighbors
import re
import geojson
import shapefile as sf
import pyproj
from osgeo import gdal, osr
from datetime import datetime
from netCDF4 import Dataset
from osgeo import gdal, osr, gdal_array, ogr
import warnings
warnings.filterwarnings('ignore')
import matplotlib as mpl
import time
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
angle_between((1, 0, 0), (1, 0, 0))
0.0
angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return np.degrees(0.0)
else:
return np.degrees(np.pi)
return np.degrees(angle)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 6}
matplotlib.rc('font', **font)
# TODO: check
def check_borders(im):
''' n pixels along line means image has a black border '''
flag = 0
ch = 0
j = 0
for i in range(im.shape[0] - 1):
while j < im.shape[1] - 1 and im[i,j] > 0:
j += 1
else:
if j < im.shape[1] - 1 and (im[i,j] == 0 or im[i,j] == 255):
while im[i,j] == 0 and j < im.shape[1] - 1:
j += 1
ch += 1
if ch >= 15:
flag = 1
#print('Black stripe detected!')
return flag
j = 0
ch = 0
return flag
# Matching
def matching(templ, im):
''' Matching '''
# Direct macthing
#pool = Pool(processes=3)
#result = pool.apply(match_template, args=(im, templ, True, 'edge',))
#pool.close()
result = match_template(im, templ, True, 'edge',)
# Drihle statement
# No need if 'edge' in 'match_template'
#n = Conf.block_size #/ 2 # 100
n = int(im.shape[0]/10.)
# First and last n lines
result[0:n, :] = 0.
result[-n:, :] = 0.
# First and last n rows
result[:, 0:n] = 0.
result[:, -n:] = 0.
ij = np.unravel_index(np.argmax(result), result.shape)
u_peak, v_peak = ij[::-1]
#print('u_peak, v_peak: (%s, %s)' % (u_peak, v_peak))
return u_peak, v_peak, result
def filter_local_homogenity(arr_cc_max, y, x, u, v, filter_all=False):
'''
Local homogenity filtering (refine CC peak)
y - axe (top -> bottom)
x - axe (left -> right)
u - along Y (top -> bottom)
v - along X (left -> right)
mask - indicate that a vector has been reprocessed
'''
# Mask array with refined tie points
mask = np.zeros_like(arr_cc_max)
# TODO: processing of border vectors
for i in range(1, x.shape[0] - 1):
for j in range(1, x.shape[1] - 1):
# Calculate median of u and v for 8 neighbors
# Matrix with negbors
nn = np.zeros(shape=(2, 3, 3))
nn[:] = np.nan
# U and V
#if not np.isnan(u[i - 1, j - 1]):
nn[0, 0, 0] = u[i - 1, j - 1]
nn[0, 0, 1] = u[i - 1, j]
nn[0, 0, 2] = u[i - 1, j + 1]
nn[1, 0, 0] = v[i - 1, j - 1]
nn[1, 0, 1] = v[i - 1, j]
nn[1, 0, 2] = v[i - 1, j + 1]
nn[0, 1, 0] = u[i, j-1]
nn[0, 1, 2] = u[i, j+1]
nn[1, 1, 0] = v[i, j - 1]
nn[1, 1, 2] = v[i, j + 1]
nn[0, 2, 0] = u[i + 1, j - 1]
nn[0, 2, 1] = u[i + 1, j]
nn[0, 2, 2] = u[i + 1, j + 1]
nn[1, 2, 0] = v[i + 1, j - 1]
nn[1, 2, 1] = v[i + 1, j]
nn[1, 2, 2] = v[i + 1, j + 1]
# Check number of nans and find median for U and V
uu = nn[0, :, :]
# If number of neighbors <= 3
if len(uu[np.isnan(uu)]) > 5:
u[i, j] = np.nan
v[i, j] = np.nan
arr_cc_max[i, j] = 0
#print 'NANs > 3!'
else:
u_median = np.nanmedian(nn[0, :, :])
v_median = np.nanmedian(nn[1, :, :])
if not filter_all:
if np.isnan(u[i, j]) or abs(u[i, j] - u_median) > abs(u_median) or \
abs(v[i, j] - v_median) > abs(v_median):
u[i, j] = u_median
v[i, j] = v_median
mask[i, j] = 1
arr_cc_max[i, j] = 1
#print '%s %s %s %s' % (u[i, j], v[i, j], u_median, v_median)
else:
u[i, j] = u_median
v[i, j] = v_median
mask[i, j] = 1
arr_cc_max[i, j] = 1
return mask, y, x, u, v, arr_cc_max
def filter_Rmin(arr_cc_max):
''' Minimum correlation threshold filtering '''
# Remove and plot vectors with R < Rmin, where Rmin = Rmean - Rstd
R_mean = np.nanmean(arr_cc_max)
R_std = np.nanstd(arr_cc_max)
R_min = R_mean - R_std
mask = np.zeros_like(arr_cc_max)
mask[(arr_cc_max < R_min)] = 1
return mask
def plot_scatter(fname, img, x, y, msize=0.1):
''' Plot scatter of initial points '''
plt.clf()
plt.imshow(Conf.img1, cmap='gray')
plt.scatter(x, y, s=msize, color='red')
plt.savefig(fname, bbox_inches='tight', dpi=600)
def plot_arrows(fname, img, x, y, u, v, cc, arrwidth=0.005, headwidth=3.5, flag_color=True):
''' Plot arrows on top of image '''
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
plt.imshow(img, cmap='gray')
if flag_color:
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
else:
plt.quiver(x, y, u, v, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=600)
# Plot start points
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
plt.imshow(img, cmap='gray')
plt.scatter(x[~np.isnan(u)], y[~np.isnan(u)], s=Conf.grid_step/2., facecolors='yellow', edgecolors='black')
plt.savefig('%s/pts_%s' % (os.path.dirname(fname), os.path.basename(fname)), bbox_inches='tight', dpi=600)
# TODO!: remove
def plot_arrows_one_color(fname, img, x, y, u, v, cc, arrwidth=0.005, headwidth=3.5, flag_color=False):
''' Plot arrows on top of image '''
plt.clf()
plt.imshow(img, cmap='gray')
if flag_color:
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
else:
plt.quiver(x, y, u, v, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=1200)
def crop_images(img1, img2, y0, x0):
'''
:param Conf.img1: image1
:param Conf.img2: image2
:param x0: center of patch on image2
:param y0: center of patch on image2
:return: image patches
'''
# TODO: x2, y2 for Conf.img2
height, width = img1.shape
# Crop Conf.img1
iidx_line = int(x0)
iidx_row = int(y0)
LLt0 = np.max([0, iidx_line - Conf.grid_step])
LLt1 = np.max([0, iidx_row - Conf.grid_step])
RRt0 = np.min([iidx_line + Conf.grid_step, height])
RRt1 = np.min([iidx_row + Conf.grid_step, width])
# Crop patch from Conf.img1
im1 = Conf.img1[LLt0:RRt0, LLt1:RRt1]
LLi0 = np.max([0, iidx_line - Conf.block_size * Conf.search_area])
LLi1 = np.max([0, iidx_row - Conf.block_size * Conf.search_area])
RRi0 = np.min([iidx_line + Conf.block_size * Conf.search_area, height])
RRi1 = np.min([iidx_row + Conf.block_size * Conf.search_area, width])
# Crop search area from Conf.img2
im2 = Conf.img2[LLi0:RRi0, LLi1:RRi1]
# Offset for image1
y_offset_Conf.img1 = iidx_line # - Conf.block_size/2
x_offset_Conf.img1 = iidx_row # - Conf.block_size/2
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im2 = median(im2, disk(Conf.median_kernel))
im1 = median(im1, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im2 = laplace(im2)
im1 = laplace(im1)
if Conf.img_gradient_filtering:
im2 = gradient(im2, disk(3))
im1 = gradient(im1, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im2 = filters.scharr(im2)
im1 = filters.scharr(im1)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
return im1, im2
# TODO: EXPERIMENTAL
def filter_BM(th = 10):
''' Back matching test '''
Conf.bm_th = th # pixels
u_back = arr_rows_2_bm - arr_rows_1_bm
u_direct = arr_rows_2 - arr_rows_1
v_back = arr_lines_2_bm - arr_lines_1_bm
v_direct = arr_lines_2 - arr_lines_1
u_dif = u_direct - u_back * (-1)
v_dif = v_direct - v_back * (-1)
#arr_rows_1, arr_lines_1, arr_rows_2, arr_lines_2, arr_cc_max
#arr_rows_1_bm, arr_lines_1_bm, arr_rows_2_bm, arr_lines_2_bm, arr_cc_max_bm
mask = np.zeros_like(arr_cc_max)
mask[:,:] = 1
mask[((abs(u_dif) < Conf.bm_th) & (abs(v_dif) < Conf.bm_th))] = 0
#mask[((abs(arr_lines_1 - arr_lines_2_bm) > Conf.bm_th) | (abs(arr_rows_1 - arr_rows_2_bm) > Conf.bm_th))] = 1
return mask
def plot_arrows_from_list(pref, fname, img, ll_data, arrwidth=0.005, headwidth=3.5, flag_color=True):
''' Plot arrows on top of image form a list of data '''
plt.clf()
plt.imshow(img, cmap='gray')
# Get list without none and each elements
ll_data = [x for x in ll_data if x is not None]
yyy = [i[0] for i in ll_data]
xxx = [i[1] for i in ll_data]
uuu = [i[2] for i in ll_data]
vvv = [i[3] for i in ll_data]
ccc = [i[4] for i in ll_data]
if flag_color:
plt.quiver(xxx, yyy, uuu, vvv, ccc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
# Plot text with coordinates
for i in range(len(xxx)):
plt.text(xxx[i], yyy[i], r'(%s,%s)' % (yyy[i], xxx[i]), fontsize=0.07, color='yellow')
plt.text(xxx[i] + uuu[i], yyy[i] + vvv[i], r'(%s,%s)' % (yyy[i] + vvv[i], xxx[i] + uuu[i]),
fontsize=0.07, color='yellow') # bbox={'facecolor': 'yellow', 'alpha': 0.5}
else:
plt.quiver(xxx, yyy, uuu, vvv, ccc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=600)
# Filter outliers here and plot
plt.clf()
plt.imshow(img, cmap='gray')
def median_filtering(x1, y1, uu, vv, cc, radius=512, total_neighbours=7):
'''
Median filtering of resultant ice vectors as a step before deformation calculation
'''
fast_ice_th = 5.
# Get values of vector components
#uu = x2 - x1
#vv = y2 - y1
idx_mask = []
# Make 2D data of components
#data = np.vstack((uu, vv)).T
x1, y1, uu, vv, cc = np.array(x1), np.array(y1), np.array(uu), np.array(vv), np.array(cc)
# Radius based filtering
vector_start_data = np.vstack((x1, y1)).T
vector_start_tree = sklearn.neighbors.KDTree(vector_start_data)
for i in range(0, len(x1), 1):
# If index of element in mask list form 'outliers_filtering' then replace with median
#if i in mask_proc:
# print('Replace with median!')
req_data = np.array([x1[i], y1[i]]).reshape(1, -1)
# Getting number of neighbours
num_nn = vector_start_tree.query_radius(req_data, r=radius, count_only=True)
# Check number of neighboors
'''
if num_nn[0] < total_neighbours:
idx_mask.append(i)
cc[i] = 0.
else:
'''
# Apply median filtering
nn = vector_start_tree.query_radius(req_data, r=radius)
data = np.vstack((uu[nn[0]], vv[nn[0]])).T
####################################################################
# Loop through all found ice drift vectors to filter not homo
####################################################################
for ii in range(num_nn[0]):
# Calculate median
#data[:, 0][ii], data[:, 1][ii]
# Replace raw with median
# If not fast ice (> 5 pixels)
if (np.hypot(uu[i], vv[i]) > fast_ice_th or np.isnan(uu[i]) or np.isnan(vv[i])):
u_median = np.nanmedian(data[:, 0][ii])
v_median = np.nanmedian(data[:, 1][ii])
#u_median = np.nanmean(data[:, 0][ii])
#v_median = np.nanmean(data[:, 1][ii])
uu[i], vv[i] = u_median, v_median
cc[i] = 0
#tt = list(set(idx_mask))
#iidx_mask = np.array(tt)
x1_f = np.array(x1)
y1_f = np.array(y1)
uu_f = np.array(uu)
vv_f = np.array(vv)
cc_f = np.array(cc)
return x1_f, y1_f, uu_f, vv_f, cc_f
def calc_deformations(dx, dy, normalization=False, normalization_time=None, cell_size=1.,
invert_meridional=True, out_png_name='test.png'):
'''
Calculate deformation invariants from X and Y ice drift components
dx, dy - x and y component of motion (pixels)
normalization - normalize to time (boolean)
normalization_time - normalization time (in seconds)
cell_size - ground meters in a pixel
invert_meridional - invert y component (boolean)
'''
# Cell size factor (in cm)
cell_size_cm = cell_size * 100.
cell_size_factor = 1 / cell_size_cm
m_div = np.empty((dx.shape[0], dx.shape[1],))
m_div[:] = np.NAN
m_curl = np.empty((dx.shape[0], dx.shape[1],))
m_curl[:] = np.NAN
m_shear = np.empty((dx.shape[0], dx.shape[1],))
m_shear[:] = np.NAN
m_tdef = np.empty((dx.shape[0], dx.shape[1],))
m_tdef[:] = np.NAN
# Invert meridional component
if invert_meridional:
dy = dy * (-1)
# Normilize u and v to 1 hour
if not normalization:
pass
else:
# Convert to ground distance (pixels*cell size(m) * 100.)
dx = dx * cell_size_cm # cm
dy = dy * cell_size_cm # cm
# Get U/V components of speed (cm/s)
dx = dx / normalization_time
dy = dy / normalization_time
# Calculate magnitude (speed module) (cm/s)
mag_speed = np.hypot(dx, dy)
# Print mean speed in cm/s
print('Mean speed: %s [cm/s]' % (np.nanmean(mag_speed)))
#cell_size_factor = 1 / cell_size
# Test
#plt.clf()
#plt.imshow(m_div)
for i in range(1, dx.shape[0] - 1):
for j in range(1, dx.shape[1] - 1):
# div
if (np.isnan(dx[i, j + 1]) == False and np.isnan(dx[i, j - 1]) == False
and np.isnan(dy[i - 1, j]) == False and np.isnan(dy[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# m_div[i,j] = 0.5*((u_int[i,j + 1] - u_int[i,j - 1]) + (v_int[i + 1,j] - v_int[i - 1,j]))/m_cell_size
# !Exclude cell size factor!
m_div[i, j] = cell_size_factor * 0.5 * ((dx[i, j + 1] - dx[i, j - 1])
+ (dy[i - 1, j] - dy[i + 1, j]))
# print m_div[i,j]
# Curl
if (np.isnan(dy[i, j + 1]) == False and np.isnan(dy[i, j - 1]) == False and
np.isnan(dx[i - 1, j]) == False and np.isnan(dx[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# !Exclude cell size factor!
m_curl[i, j] = cell_size_factor * 0.5 * (dy[i, j + 1] - dy[i, j - 1]
- dx[i - 1, j] + dx[i + 1, j]) / cell_size
# Shear
if (np.isnan(dy[i + 1, j]) == False and np.isnan(dy[i - 1, j]) == False and
np.isnan(dx[i, j - 1]) == False and np.isnan(dx[i, j + 1]) == False and
np.isnan(dy[i, j - 1]) == False and np.isnan(dy[i, j + 1]) == False and
np.isnan(dx[i + 1, j]) == False and np.isnan(dx[i - 1, j]) == False and
(np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
dc_dc = cell_size_factor * 0.5 * (dy[i + 1, j] - dy[i - 1, j])
dr_dr = cell_size_factor * 0.5 * (dx[i, j - 1] - dx[i, j + 1])
dc_dr = cell_size_factor * 0.5 * (dy[i, j - 1] - dy[i, j + 1])
dr_dc = cell_size_factor * 0.5 * (dx[i + 1, j] - dx[i - 1, j])
# !Exclude cell size factor!
m_shear[i, j] = np.sqrt(
(dc_dc - dr_dr) * (dc_dc - dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc)) / cell_size
'''
# Den
dc_dc = 0.5*(v_int[i + 1,j] - v_int[i - 1,j])
dr_dr = 0.5*(u_int[i,j + 1] - u_int[i,j - 1])
dc_dr = 0.5*(v_int[i,j + 1] - v_int[i,j - 1])
dr_dc = 0.5*(u_int[i + 1,j] - u_int[i - 1,j])
m_shear[i,j] = np.sqrt((dc_dc -dr_dr) * (dc_dc -dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc))/m_cell_size
'''
# Total deformation
if (np.isnan(m_shear[i, j]) == False and np.isnan(m_div[i, j]) == False):
m_tdef[i, j] = np.hypot(m_shear[i, j], m_div[i, j])
# Invert dy back
if invert_meridional:
dy = dy * (-1)
# data = np.vstack((np.ravel(xx_int), np.ravel(yy_int), np.ravel(m_div), np.ravel(u_int), np.ravel(v_int))).T
divergence = m_div
# TODO: Plot Test Div
plt.clf()
plt.gca().invert_yaxis()
plt.imshow(divergence, cmap='RdBu', vmin=-0.00008, vmax=0.00008,
interpolation='nearest', zorder=2) # vmin=-0.06, vmax=0.06,
# Plot u and v values inside cells (for testing porposes)
'''
font_size = .0000003
for ii in range(dx.shape[1]):
for jj in range(dx.shape[0]):
try:
if not np.isnan(divergence[ii,jj]):
if divergence[ii,jj] > 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '+', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] < 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] == 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '0', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if np.isnan(divergence[ii,jj]):
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
except:
pass
'''
# Plot drift arrows on the top
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
# Invert meridional component for plotting
ddy = dy * (-1)
#norm = Normalize()
colors = np.hypot(dx, ddy)
#print(colors)
#norm.autoscale(colors)
# we need to normalize our colors array to match it colormap domain
# which is [0, 1]
#colormap = cm.inferno
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
plt.quiver(xxx, yyy, dx, ddy, colors, cmap='Greys', zorder=3) #'YlOrBr')
# Invert Y axis
plt.savefig(out_png_name, bbox_inches='tight', dpi=800)
curl = m_curl
shear = m_shear
total_deform = m_tdef
# return mag in cm/s
return mag_speed, divergence, curl, shear, total_deform
# !TODO:
def make_nc(nc_fname, lons, lats, data):
"""
Make netcdf4 file for deformation (divergence, shear, total deformation), scaled 10^(-4)
"""
print('\nStart making nc for defo...')
ds = Dataset(nc_fname, 'w', format='NETCDF4_CLASSIC')
print(ds.file_format)
# Dimensions
y_dim = ds.createDimension('y', lons.shape[0])
x_dim = ds.createDimension('x', lons.shape[1])
time_dim = ds.createDimension('time', None)
#data_dim = ds.createDimension('data', len([k for k in data.keys()]))
# Variables
times = ds.createVariable('time', np.float64, ('time',))
latitudes = ds.createVariable('lat', np.float32, ('y', 'x',))
longitudes = ds.createVariable('lon', np.float32, ('y', 'x',))
for var_name in data.keys():
globals()[var_name] = ds.createVariable(var_name, np.float32, ('y', 'x',))
globals()[var_name][:, :] = data[var_name]['data']
globals()[var_name].units = data[var_name]['units']
globals()[var_name].scale_factor = data[var_name]['scale_factor']
# Global Attributes
ds.description = 'Sea ice deformation product'
ds.history = 'Created ' + time.ctime(time.time())
ds.source = 'NIERSC/NERSC'
# Variable Attributes
latitudes.units = 'degree_north'
longitudes.units = 'degree_east'
times.units = 'hours since 0001-01-01 00:00:00'
times.calendar = 'gregorian'
# Put variables
latitudes[:, :] = lats
longitudes[:, :] = lons
ds.close()
def create_geotiff(suffix, data, NDV, GeoT, Projection):
''' Create geotiff file (1 band)'''
# Get GDAL data type
dataType = gdal_array.NumericTypeCodeToGDALTypeCode(data.dtype)
# NaNs to the no data value
data[np.isnan(data)] = NDV
if type(dataType) != np.int:
if dataType.startswith('gdal.GDT_') == False:
dataType = eval('gdal.GDT_' + dataType)
newFileName = suffix + '_test.tif'
cols = data.shape[1]
rows = data.shape[0]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newFileName, cols, rows, 1, dataType)
#outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outRaster.SetGeoTransform(GeoT)
outband = outRaster.GetRasterBand(1)
outband.WriteArray(data)
outRaster.SetProjection(Projection)
outband.SetNoDataValue(NDV)
outband.FlushCache()
return newFileName
def apply_anisd(img, gamma=0.25, step=(1., 1.), ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Sep 2017 modified by Denis Demchev
"""
# init args
kappa = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['kappa']
niter = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['N']
option = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['equation']
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# niter
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
fig = pl.figure(figsize=(20, 5.5), num="Anisotropic diffusion")
ax1, ax2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)
ax1.imshow(img, interpolation='nearest')
ih = ax2.imshow(imgout, interpolation='nearest', animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS / kappa) ** 2.) / step[0]
gE = np.exp(-(deltaE / kappa) ** 2.) / step[1]
elif option == 2:
gS = 1. / (1. + (deltaS / kappa) ** 2.) / step[0]
gE = 1. / (1. + (deltaE / kappa) ** 2.) / step[1]
# update matrices
E = gE * deltaE
S = gS * deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma * (NS + EW)
if ploton:
iterstring = "Iteration %i" % (ii + 1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return cv2.convertScaleAbs(imgout)
#################################################################################
#################################################################################
#################################################################################
# MAIN PROGRAM
#################################################################################
#################################################################################
#################################################################################
# run cc_bm_parallel_dev.py ./data/test_kara_01.tif ./data/test_kara_02.tif 64 4 100
import cc_config
import cc_calc_drift
import cc_calc_drift_filter
import cc_calc_defo
#VAS
if __name__ == '__main__':
multiprocessing.freeze_support()
# check command line args
assert (len(sys.argv) == 6), "Expecting 5 arguments: filename1 filename2 block_size search_area grid_step"
# init config class
Conf = cc_config.Config()
Conf.init(f1_name=sys.argv[1], f2_name=sys.argv[2],
block_size=int(sys.argv[3]), search_area=int(sys.argv[4]), grid_step=int(sys.argv[5]))
Conf.self_prepare()
global_start_time = time.time()
# Downscale
if Conf.rescale_apply:
print('Rescaling...')
Conf.img1 = rescale(Conf.img1, 1.0 / Conf.rescale_factor)
Conf.img2 = rescale(Conf.img2, 1.0 / Conf.rescale_factor)
print('Done!')
# Image intensity normalization
if Conf.image_intensity_byte_normalization:
print('\nImage intensity rescaling (0, 255)...')
#Conf.img1 = exposure.adjust_log(Conf.img1)
#Conf.img2 = exposure.adjust_log(Conf.img2)
# Rescale intensity only
Conf.img1 = exposure.rescale_intensity(Conf.img1, out_range=(0, 255))
Conf.img2 = exposure.rescale_intensity(Conf.img2, out_range=(0, 255))
p2, p98 = np.percentile(Conf.img1, (2, 98))
Conf.img1 = img_as_ubyte(exposure.rescale_intensity(Conf.img1, in_range=(p2, p98)))
p2, p98 = np.percentile(Conf.img2, (2, 98))
Conf.img2 = img_as_ubyte(exposure.rescale_intensity(Conf.img2, in_range=(p2, p98)))
print('Done!')
# Normalization
#print('\n### Laplacian! ###\n')
#Conf.img1 = cv2.Laplacian(Conf.img1, cv2.CV_64F, ksize=19)
#Conf.img2 = cv2.Laplacian(Conf.img2, cv2.CV_64F, ksize=19)
# Speckle filtering
if Conf.speckle_filtering:
assert (Conf.speckle_filtering and (Conf.speckle_filter_name in Conf.speckle_filter_name)), \
'%s error: appropriate processor is not found' % Conf.speckle_filter_name
print('\nSpeckle filtering with %s\n' % Conf.speckle_filter_name)
if Conf.speckle_filter_name == 'Anisd':
Conf.img1 = apply_anisd(Conf.img1, gamma=0.25, step=(1., 1.), ploton=False)
Conf.img2 = apply_anisd(Conf.img2, gamma=0.25, step=(1., 1.), ploton=False)
#####################
### Calculate Drift ###
#####################
print('\nStart multiprocessing...')
nb_cpus = 10
height, width = Conf.img1.shape
print('Image size Height: %s px Width: %s px' % (height, width))
# init drift calculator class
Calc = cc_calc_drift.CalcDrift(Conf, Conf.img1, Conf.img2)
Calc.create_arguments(height, width)
# arg generator
argGen = ((i) for i in range(Calc.Count))
pool = multiprocessing.Pool(processes=nb_cpus)
# calculate
results = pool.map(Calc.calculate_drift, argGen)
pool.close()
pool.join()
print('Done!')
exec_t = (time.time() - global_start_time) / 60.
print('Calculated in--- %.1f minutes ---' % exec_t)
pref = 'dm'
'''
print('\nPlotting...')
try:
plot_arrows_from_list(pref, '%s/%s_%s_01.png' % (Conf.res_dir, pref, Conf.out_fname),
Conf.img1, results, arrwidth=0.0021, headwidth=2.5, flag_color=True)
plot_arrows_from_list(pref, '%s/%s_%s_02.png' % (Conf.res_dir, pref, Conf.out_fname),
Conf.img2, results, arrwidth=0.0021, headwidth=2.5, flag_color=True)
print('Plot end!')
except:
print('Plot FAULT!')
'''
#####################
#### Filter vectors ####
#####################
print('\nStart outliers filtering...')
# init result filtering class
Filter = cc_calc_drift_filter.CalcDriftFilter(Conf)
# filter
Cnt = Filter.filter_outliers(results)
# Filter land vectors
print('\nLand mask filtering...')
land_filtered_vectors = Filter.filter_land()
print('Done\n')
print('Done!')
print('\nNumber of vectors: \n Unfiltered: %d Filtered: %d\n' %
(Cnt[0], Cnt[1]))
print('\nPlotting...')
plot_arrows('%s/01_spikes_%s_%s.png' % (Conf.res_dir, pref, Conf.out_fname), Conf.img1, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f, Filter.ccc_f,
arrwidth=0.002, headwidth=5.5, flag_color=True)
plot_arrows('%s/02_spikes_%s_%s.png' % (Conf.res_dir, pref, Conf.out_fname), Conf.img2, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f, Filter.ccc_f,
arrwidth=0.002, headwidth=5.5, flag_color=True)
#####################
#### Defo calculate ####
#####################
print('\n### Start deformation calculation...')
# init defo calculator class
Defo = cc_calc_defo.CalcDefo(Conf, Calc, Filter)
# calculate deformation from the 2D arrays
mag_speed, divergence, curl, shear, total_deform = Defo.calculate_defo()
print('\n### Success!\n')
#########################
# EXPORT TO GEO-FORMATS
#########################
files_pref = '%spx' % Conf.grid_step
try:
os.makedirs('%s/vec' % Conf.res_dir)
except:
pass
try:
os.makedirs('%s/defo/nc' % Conf.res_dir)
except:
pass
# Vector
export_to_vector(Conf.f1_name, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f,
'%s/vec/%s_ICEDRIFT_%s.json' % (Conf.res_dir, files_pref, Conf.out_fname),
gridded=False, data_format='geojson')
################
# Geotiff
################
print('\nStart making geotiff..')
try:
os.makedirs('%s/defo/gtiff' % Conf.res_dir)
except:
pass
scale_factor = 1
divergence_gtiff = divergence * scale_factor
GeoT = (Calc.geotransform[0] - Conf.grid_step/2.*Calc.pixelHeight, Conf.grid_step*Calc.pixelWidth, 0.,
Calc.geotransform[3] + Conf.grid_step/2.*Calc.pixelHeight, 0., Conf.grid_step*Calc.pixelHeight)
NDV = np.nan
# Get projection WKT
gd_raster = gdal.Open(Conf.f1_name)
Projection = gd_raster.GetProjection()
#create_geotiff('%s/defo/gtiff/%s_ICEDIV_%s' % (Conf.res_dir, files_pref, Conf.out_fname),
# divergence_gtiff, NDV, u_2d.shape[0], u_2d.shape[1], GeoT, Projection, divergence_gtiff)
create_geotiff('%s/defo/gtiff/%s_ICEDIV_%s' % (Conf.res_dir, files_pref, Conf.out_fname), divergence_gtiff, NDV, GeoT, Projection)
#####################
# Shear
#####################
shear_gtiff = shear * scale_factor
GeoT = (Calc.geotransform[0] - Conf.grid_step / 2. * Calc.pixelHeight, Conf.grid_step * Calc.pixelWidth, 0.,
Calc.geotransform[3] + Conf.grid_step / 2. * Calc.pixelHeight, 0., Conf.grid_step * Calc.pixelHeight)
NDV = np.nan
# Get projection WKT
gd_raster = gdal.Open(Conf.f1_name)
Projection = gd_raster.GetProjection()
create_geotiff('%s/defo/gtiff/%s_ICESHEAR_%s' % (Conf.res_dir, files_pref, Conf.out_fname), shear_gtiff, NDV,
GeoT, Projection)
################
# END Geotiff
################
############
# Netcdf
############
dict_deformation = {'ice_speed': {'data': mag_speed, 'scale_factor': 1., 'units': 'cm/s'},
'ice_divergence': {'data': divergence, 'scale_factor': scale_factor, 'units': '1/h'},
'ice_curl': {'data': curl, 'scale_factor': scale_factor, 'units': '1/h'},
'ice_shear': {'data': shear, 'scale_factor': scale_factor, 'units': '1/h'},
'total_deformation': {'data': total_deform, 'scale_factor': scale_factor, 'units': '1/h'}}
print('\nStart making netCDF for ice deformation...\n')
make_nc('%s/defo/nc/%s_ICEDEF_%s.nc' % (Conf.res_dir, files_pref, Conf.out_fname),
Calc.lon_2d, Calc.lat_2d, dict_deformation)
print('Success!\n')
############
# END Netcdf
############
############################
# END EXPORT TO GEO-FORMATS
############################
# Calc_img_entropy
calc_img_entropy = False
#ent_spikes_dm_S1A_EW_GRDM_1SDH_20150114T133134_20150114T133234_004168_0050E3_8C66_HV_S1A_EW_GRDM_1SDH_20150115T025040_20150115T025140_004176_005114_5C27_HV
d1 = re.findall(r'\d\d\d\d\d\d\d\d\w\d\d\d\d\d\d', Conf.f1_name)[0]
d2 = re.findall(r'\d\d\d\d\d\d\d\d\w\d\d\d\d\d\d', Conf.f2_name)[0]
# Calculate entropy
if calc_img_entropy:
print('Calculate entropy')
plt.clf()
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
entr_Conf.img1 = entropy(Conf.img1, disk(16))
# xxx_f, yyy_f
ff = open('%s/entropy/ent_NCC_%s_%s.txt' % (Conf.res_dir, d1, d2), 'w')
for i in range(len(xxx_f)):
ff.write('%7d %7.2f\n' % (i+1, np.mean(entr_Conf.img1[yyy_f[i]-Conf.grid_step:yyy_f[i]+Conf.grid_step,
xxx_f[i]-Conf.grid_step:xxx_f[i]+Conf.grid_step])))
ff.close()
# TODO:
plt.imshow(entr_Conf.img1, cmap=plt.cm.get_cmap('hot', 10))
plt.colorbar()
plt.clim(0, 10);
plt.savefig('%s/entropy/img/ent_NCC_%s_%s.png' % (Conf.res_dir, d1, d2), bbox_inches='tight', dpi=300)
# END | 35.517132 | 716 | 0.553318 |
05fe79efe59900fb39e193105ec376940b5bbe44 | 426 | py | Python | tests/test_version.py | hsh-nids/python-betterproto | f5d3b48b1aa49fd64513907ed70124b32758ad3e | [
"MIT"
] | 708 | 2019-10-11T06:23:40.000Z | 2022-03-31T09:39:08.000Z | tests/test_version.py | hsh-nids/python-betterproto | f5d3b48b1aa49fd64513907ed70124b32758ad3e | [
"MIT"
] | 302 | 2019-11-11T22:09:21.000Z | 2022-03-29T11:21:04.000Z | tests/test_version.py | hsh-nids/python-betterproto | f5d3b48b1aa49fd64513907ed70124b32758ad3e | [
"MIT"
] | 122 | 2019-12-04T16:22:53.000Z | 2022-03-20T09:31:10.000Z | from betterproto import __version__
from pathlib import Path
import tomlkit
PROJECT_TOML = Path(__file__).joinpath("..", "..", "pyproject.toml").resolve()
| 30.428571 | 78 | 0.706573 |
af01a3ec2accdacee77c90151e5eed151050b732 | 383 | py | Python | PythonMundoDois/ex048.py | HendrylNogueira/CursoPython3 | c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f | [
"MIT"
] | null | null | null | PythonMundoDois/ex048.py | HendrylNogueira/CursoPython3 | c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f | [
"MIT"
] | null | null | null | PythonMundoDois/ex048.py | HendrylNogueira/CursoPython3 | c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f | [
"MIT"
] | null | null | null | '''Faa um programa que calcule a soma entre todos os nmeros impares que so mltiplos de trs e que se encontram
no intervalo de 1 at 500. '''
cont = 0
total = 0
for soma in range(1, 501, 2):
if soma % 3 == 0:
cont += 1
total += soma
print(f'Foram encontrados {cont} valores coma as caractersticas especificadas.')
print(f'E a soma deles igual a {total}')
| 31.916667 | 114 | 0.67624 |
af029a134b4e84a7dca43a17a1ce48c9d78abdd2 | 9,722 | py | Python | Models.py | BradHend/machine_learning_from_scratch | 6c83f17d1c48da9ad3df902b3090a8cb2c544f15 | [
"MIT"
] | null | null | null | Models.py | BradHend/machine_learning_from_scratch | 6c83f17d1c48da9ad3df902b3090a8cb2c544f15 | [
"MIT"
] | null | null | null | Models.py | BradHend/machine_learning_from_scratch | 6c83f17d1c48da9ad3df902b3090a8cb2c544f15 | [
"MIT"
] | null | null | null | """classes and methods for different model architectures
"""
#python packages
import numpy as np
# Machine Learning from Scratch packages
from Layers import FullyConnected
from utils.optimizers import *
| 40.508333 | 123 | 0.54783 |
af03e1bca2e6bcaf4e2f161d2b4078d32b20e402 | 421 | py | Python | tests/parser/aggregates.count.assignment.17.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/aggregates.count.assignment.17.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/aggregates.count.assignment.17.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | input = """
a(S,T,Z) :- #count{X: r(T,X)} = Z, #count{W: q(W,S)} = T, #count{K: p(K,Y)} = S.
q(1,1).
q(2,2).
r(1,1).
r(1,2).
r(1,3).
r(2,2).
r(3,3).
p(1,1).
p(2,2).
%out{ a(2,1,3) }
%repository error
"""
output = """
a(S,T,Z) :- #count{X: r(T,X)} = Z, #count{W: q(W,S)} = T, #count{K: p(K,Y)} = S.
q(1,1).
q(2,2).
r(1,1).
r(1,2).
r(1,3).
r(2,2).
r(3,3).
p(1,1).
p(2,2).
%out{ a(2,1,3) }
%repository error
"""
| 10.268293 | 80 | 0.420428 |
af055ba7a6d6cbe2445070c4e478e7e26c56dad3 | 1,724 | py | Python | ipmi_power_manager.py | spirkaa/ansible-homelab | 94138c85ddb132a08dab55b4e9a9b43160d02c76 | [
"MIT"
] | null | null | null | ipmi_power_manager.py | spirkaa/ansible-homelab | 94138c85ddb132a08dab55b4e9a9b43160d02c76 | [
"MIT"
] | null | null | null | ipmi_power_manager.py | spirkaa/ansible-homelab | 94138c85ddb132a08dab55b4e9a9b43160d02c76 | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import requests
import urllib3
from dotenv import load_dotenv
logger = logging.getLogger("__name__")
logging.basicConfig(
format="%(asctime)s [%(levelname)8s] [%(name)s:%(lineno)s:%(funcName)20s()] --- %(message)s",
level=logging.INFO,
)
logging.getLogger("urllib3").setLevel(logging.WARNING)
urllib3.disable_warnings()
load_dotenv()
IPMI_USERNAME = os.getenv("IPMI_USERNAME")
IPMI_PASSWORD = os.getenv("IPMI_PASSWORD")
API_ROOT = "https://spmaxi-ipmi.home.devmem.ru/redfish/v1/"
API_AUTH = "SessionService/Sessions"
API_ACTIONS_RESET = "Systems/1/Actions/ComputerSystem.Reset"
POWER_STATE_ON = "On"
POWER_STATE_OFF = "GracefulShutdown"
parser = argparse.ArgumentParser(description="Supermicro IPMI Power Manager")
parser.add_argument("--on", dest="power_state", action="store_true")
parser.add_argument("--off", dest="power_state", action="store_false")
args = parser.parse_args()
if args.power_state:
power_state = POWER_STATE_ON
else:
power_state = POWER_STATE_OFF
set_power_state(power_state)
| 28.262295 | 101 | 0.728538 |
af05ab26695bad32472af5a5dde8334bddbea53d | 1,572 | py | Python | pyhsi/gui/graphics.py | rddunphy/pyHSI | b55c2a49568e04e0a2fb39da01cfe1f129bc86a4 | [
"MIT"
] | null | null | null | pyhsi/gui/graphics.py | rddunphy/pyHSI | b55c2a49568e04e0a2fb39da01cfe1f129bc86a4 | [
"MIT"
] | null | null | null | pyhsi/gui/graphics.py | rddunphy/pyHSI | b55c2a49568e04e0a2fb39da01cfe1f129bc86a4 | [
"MIT"
] | null | null | null | """Stuff to do with processing images and loading icons"""
import importlib.resources as res
import cv2
import PySimpleGUI as sg
def get_application_icon():
"""Get the PyHSI icon for this OS (.ico for Windows, .png otherwise)"""
return res.read_binary("pyhsi.gui.icons", "pyhsi.png")
def get_icon(icon_name, hidpi=False):
"""Return full path for icon with given name"""
size = 40 if hidpi else 25
return res.read_binary("pyhsi.gui.icons", f"{icon_name}{size}.png")
def get_icon_button(icon_name, hidpi=False, **kwargs):
"""Create a button with an icon as an image"""
mc = ("white", "#405e92")
icon = get_icon(icon_name, hidpi=hidpi)
return sg.Button("", image_data=icon, mouseover_colors=mc, **kwargs)
def set_button_icon(button, icon_name, hidpi=False, **kwargs):
"""Change image on button"""
icon = get_icon(icon_name, hidpi=hidpi)
button.update(image_data=icon, **kwargs)
def resize_img_to_area(img, size, preserve_aspect_ratio=True, interpolation=False):
"""Resize frame to fill available area in preview panel"""
max_w = max(size[0] - 20, 20)
max_h = max(size[1] - 20, 20)
if preserve_aspect_ratio:
old_h = img.shape[0]
old_w = img.shape[1]
new_w = round(min(max_w, old_w * max_h / old_h))
new_h = round(min(max_h, old_h * max_w / old_w))
else:
new_w = max_w
new_h = max_h
if interpolation:
interp = cv2.INTER_LINEAR
else:
interp = cv2.INTER_NEAREST
return cv2.resize(img, (new_w, new_h), interpolation=interp)
| 31.44 | 83 | 0.667939 |
af0729cb1679e26625740cd816c3bcd5296cbb19 | 315 | py | Python | configs/densenet169_lr_0.001.py | FeiYuejiao/NLP_Pretrain | 7aa4693c31a7bba9b90f401d2586ef154dd7fb81 | [
"MIT"
] | null | null | null | configs/densenet169_lr_0.001.py | FeiYuejiao/NLP_Pretrain | 7aa4693c31a7bba9b90f401d2586ef154dd7fb81 | [
"MIT"
] | 1 | 2020-12-30T13:49:29.000Z | 2020-12-30T13:49:29.000Z | configs/densenet169_lr_0.001.py | FeiYuejiao/NLP_Pretrain | 7aa4693c31a7bba9b90f401d2586ef154dd7fb81 | [
"MIT"
] | null | null | null | lr = 0.001
model_path = 'model/IC_models/densenet169_lr_0.001/'
crop_size = 32
log_step = 10
save_step = 500
num_epochs = 400
batch_size = 256
num_workers = 8
loading = False
# lr
# Model parameters
model = dict(
net='densenet169',
embed_size=256,
hidden_size=512,
num_layers=1,
resnet=101
)
| 14.318182 | 52 | 0.695238 |
af08ea1d739ab24c301e649fcfca7bffa176fb4c | 3,750 | py | Python | src/models/metapop.py | TLouf/multiling-twitter | 9a39b5b70da53ca717cb74480697f3756a95b8e4 | [
"RSA-MD"
] | 1 | 2021-05-09T15:42:04.000Z | 2021-05-09T15:42:04.000Z | src/models/metapop.py | TLouf/multiling-twitter | 9a39b5b70da53ca717cb74480697f3756a95b8e4 | [
"RSA-MD"
] | 3 | 2020-10-21T09:04:03.000Z | 2021-06-02T02:05:13.000Z | src/models/metapop.py | TLouf/multiling-twitter | 9a39b5b70da53ca717cb74480697f3756a95b8e4 | [
"RSA-MD"
] | null | null | null | '''
Implements the computation of the time derivatives and associated Jacobian
corresponding to the approximated equations in a metapopulation. Added kwargs in
every function so that we may reuse the parameter dictionary used in the models,
even if some of the parameters it contains are not used in these functions.
'''
import numpy as np
def bi_model_system(N_L, N, nu, nu_T_N, a=1, s=0.5, rate=1, **kwargs):
'''
Computes the values of the time derivatives in every cell for the two
monolingual kinds, for Castello's model.
'''
N_A = N_L[:N.shape[0]]
N_B = N_L[N.shape[0]:]
# Every element of the line i of nu must be divided by the same value
# sigma[i], hence this trick with the two transpose.
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
N_A_eq = rate * (
s * (N - N_A - N_B) * np.dot(nu, (1 - nu_T_N_B / nu_T_N)**a)
- (1-s) * N_A * np.dot(nu, (nu_T_N_B / nu_T_N)**a))
N_B_eq = rate * (
(1-s) * (N - N_A - N_B) * np.dot(nu, (1 - nu_T_N_A / nu_T_N)**a)
- s * N_B * np.dot(nu, (nu_T_N_A / nu_T_N)**a))
return np.concatenate((N_A_eq, N_B_eq))
def bi_pref_system(N_L, N, nu, nu_T_N, mu=0.02, c=0.1, s=0.5, q=0.5, rate=1,
**kwargs):
'''
Computes the values of the time derivatives in every cell for the two
monolingual kinds, for our model.
'''
N_A = N_L[:N.shape[0]]
N_B = N_L[N.shape[0]:]
# Every element of the line i of nu must be divided by the same value
# sigma[i], hence this trick with the two transpose.
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
sum_nu_rows = np.sum(nu, axis=1)
nu_nu_T_N_L_term = np.dot(nu, ((1-q)*nu_T_N_A - q*nu_T_N_B) / nu_T_N)
N_A_eq = rate * (
mu*s * (N - N_A - N_B) * (q*sum_nu_rows + nu_nu_T_N_L_term)
- c*(1-mu)*(1-s) * N_A * ((1-q)*sum_nu_rows - nu_nu_T_N_L_term))
N_B_eq = rate * (
mu*(1-s) * (N - N_A - N_B) * ((1-q)*sum_nu_rows - nu_nu_T_N_L_term)
- c*(1-mu)*s * N_B * (q*sum_nu_rows + nu_nu_T_N_L_term))
return np.concatenate((N_A_eq, N_B_eq))
def bi_pref_jacobian(N_L, N, nu, nu_T_N, mu=0.02, c=0.1, s=0.5, q=0.5,
**kwargs):
'''
Computes the Jacobian of the system at a given point for our model.
'''
n_cells = N.shape[0]
N_A = N_L[:n_cells]
N_B = N_L[n_cells:]
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
nu_cols_prod = np.dot(nu / nu_T_N, nu.T)
nu_T_N_L_term = ((1-q)*nu_T_N_A - q*nu_T_N_B) / nu_T_N
sum_nu_rows = np.sum(nu, axis=1)
AA_block = ((mu*s*(1-q)*(N-N_A-N_B) + c*(1-mu)*(1-s)*(1-q)*N_A)
* nu_cols_prod.T).T
AA_block += np.eye(n_cells) * (
(-mu*s*q - c*(1-mu)*(1-s)*(1-q)) * sum_nu_rows
+ np.dot(
nu,
(c*(1-mu)*(1-s) - mu*s) * nu_T_N_L_term))
AB_block = ((-mu*s*q*(N-N_A-N_B) - c*(1-mu)*(1-s)*q*N_A)
* nu_cols_prod.T).T
AB_block += np.eye(n_cells) * (
-mu*s*q * sum_nu_rows
+ np.dot(
nu,
-mu*s * nu_T_N_L_term))
BA_block = (-(mu*(1-s)*(1-q)*(N-N_A-N_B) - c*(1-mu)*s*(1-q)*N_B)
* nu_cols_prod.T).T
BA_block += np.eye(n_cells) * (
-mu*(1-s)*(1-q) * sum_nu_rows
+ np.dot(
nu,
mu*(1-s) * nu_T_N_L_term))
BB_block = ((mu*(1-s)*q*(N-N_A-N_B) + c*(1-mu)*s*q*N_B)
* nu_cols_prod.T).T
BB_block += np.eye(n_cells) * (
(-mu*(1-s)*(1-q) - c*(1-mu)*s*q) * sum_nu_rows
+ np.dot(
nu,
(-c*(1-mu)*s + mu*(1-s)) * nu_T_N_L_term))
jacobian = np.block([[AA_block, AB_block],
[BA_block, BB_block]])
return jacobian
| 37.128713 | 80 | 0.553333 |
af0a0e2a3cb4cd7ca612fe33ee2283d0d807bbec | 2,759 | py | Python | abstract_tiles.py | CompassMentis/towers_of_strength | 405af4dc114bd15fed24135b050267a2126c9d52 | [
"MIT"
] | null | null | null | abstract_tiles.py | CompassMentis/towers_of_strength | 405af4dc114bd15fed24135b050267a2126c9d52 | [
"MIT"
] | 1 | 2019-10-12T10:31:24.000Z | 2019-10-12T10:31:24.000Z | abstract_tiles.py | CompassMentis/towers_of_strength | 405af4dc114bd15fed24135b050267a2126c9d52 | [
"MIT"
] | null | null | null | import pygame
from settings import Settings
from vector import Vector
import utils
| 29.98913 | 119 | 0.642987 |