hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e11d5329a3f8d1b9c8259746d3b9e45f2b0afeb5 | 490 | py | Python | pandaharvester/harvestercredmanager/base_cred_manager.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 7 | 2019-08-30T07:35:48.000Z | 2022-02-13T14:46:53.000Z | pandaharvester/harvestercredmanager/base_cred_manager.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 29 | 2019-09-20T14:04:37.000Z | 2021-09-13T12:53:05.000Z | pandaharvester/harvestercredmanager/base_cred_manager.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 15 | 2019-07-30T11:48:29.000Z | 2022-03-29T21:49:05.000Z | from pandaharvester.harvestercore.plugin_base import PluginBase
# base credential manager
class BaseCredManager(PluginBase):
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
# check credential lifetime for monitoring/alerting purposes
def check_credential_lifetime(self):
return
# check proxy
def check_credential(self):
return True
# renew proxy
def renew_credential(self):
return True, ''
| 22.272727 | 64 | 0.7 |
93735a218f7961025e3482f40c7f8cb2bd4f0288 | 54,985 | py | Python | models/networks.py | CaptainEven/MyEnlightenGAN | 8e7cfb01d77afa26ddc98e362ed4616f0cd3f23a | [
"BSD-3-Clause"
] | 1 | 2022-02-28T08:26:16.000Z | 2022-02-28T08:26:16.000Z | models/networks.py | CaptainEven/MyEnlightenGAN | 8e7cfb01d77afa26ddc98e362ed4616f0cd3f23a | [
"BSD-3-Clause"
] | null | null | null | models/networks.py | CaptainEven/MyEnlightenGAN | 8e7cfb01d77afa26ddc98e362ed4616f0cd3f23a | [
"BSD-3-Clause"
] | null | null | null | import functools
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import init
# from torch.utils.serialization import load_lua
from lib.nn import SynchronizedBatchNorm2d as SynBN2d
###############################################################################
# Functions
###############################################################################
def pad_tensor(input, divide=16):
"""
:param input:
:param divide:
:return:
"""
height_org, width_org = input.shape[-2:]
if width_org % divide != 0 or height_org % divide != 0:
width_res = width_org % divide
height_res = height_org % divide
if width_res != 0:
width_div = divide - width_res
pad_left = int(width_div / 2)
pad_right = int(width_div - pad_left)
else:
pad_left = 0
pad_right = 0
if height_res != 0:
height_div = divide - height_res
pad_top = int(height_div / 2)
pad_bottom = int(height_div - pad_top)
else:
pad_top = 0
pad_bottom = 0
padding = nn.ReflectionPad2d((pad_left, pad_right, pad_top, pad_bottom))
input = padding(input)
else:
pad_left = 0
pad_right = 0
pad_top = 0
pad_bottom = 0
height, width = input.data.shape[-2:]
assert width % divide == 0, 'width cant divided by stride'
assert height % divide == 0, 'height cant divided by stride'
return input, pad_left, pad_right, pad_top, pad_bottom
def pad_tensor_back(input, pad_left, pad_right, pad_top, pad_bottom):
"""
:param input:
:param pad_left:
:param pad_right:
:param pad_top:
:param pad_bottom:
:return:
"""
height, width = input.shape[2], input.shape[3]
return input[:, :, pad_top: height - pad_bottom, pad_left: width - pad_right]
def weights_init(m):
"""
:param m:
:return:
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'synBN':
norm_layer = functools.partial(SynBN2d, affine=True)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
return norm_layer
def define_G(input_nc,
output_nc,
ngf,
which_model_netG,
norm='batch',
use_dropout=False,
gpu_ids=[],
skip=False,
opt=None):
"""
:param input_nc:
:param output_nc:
:param ngf:
:param which_model_netG:
:param norm:
:param use_dropout:
:param gpu_ids:
:param skip:
:param opt:
:return:
"""
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert (torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc,
output_nc,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
n_blocks=9,
gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc,
output_nc,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
n_blocks=6,
gpu_ids=gpu_ids)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc,
7,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc,
output_nc,
8,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
gpu_ids=gpu_ids,
skip=skip,
opt=opt)
elif which_model_netG == 'unet_512':
netG = UnetGenerator(input_nc,
output_nc,
9,
ngf,
norm_layer=norm_layer,
use_dropout=use_dropout,
gpu_ids=gpu_ids,
skip=skip,
opt=opt)
elif which_model_netG == 'sid_unet':
netG = Unet(opt, skip)
elif which_model_netG == 'sid_unet_shuffle':
netG = Unet_pixelshuffle(opt, skip)
elif which_model_netG == 'sid_unet_resize': # go this way
netG = Unet_resize_conv(opt, skip)
elif which_model_netG == 'DnCNN':
netG = DnCNN(opt, depth=17, n_channels=64, image_channels=1, use_bnorm=True, kernel_size=3)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) >= 0:
netG.cuda(device=gpu_ids[0])
netG = torch.nn.DataParallel(netG, gpu_ids)
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[], patch=False):
"""
:param input_nc:
:param ndf:
:param which_model_netD:
:param n_layers_D:
:param norm:
:param use_sigmoid:
:param gpu_ids:
:param patch:
:return:
"""
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert (torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid,
gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid,
gpu_ids=gpu_ids)
elif which_model_netD == 'no_norm':
netD = NoNormDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'no_norm_4':
netD = NoNormDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'no_patchgan':
netD = FCDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids, patch=patch)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(device=gpu_ids[0])
netD = torch.nn.DataParallel(netD, gpu_ids)
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
class DiscLossWGANGP():
def __init__(self):
self.LAMBDA = 10
def name(self):
return 'DiscLossWGAN-GP'
def initialize(self, opt, tensor):
# DiscLossLS.initialize(self, opt, tensor)
self.LAMBDA = 10
# def get_g_loss(self, net, realA, fakeB):
# # First, G(A) should fake the discriminator
# self.D_fake = net.forward(fakeB)
# return -self.D_fake.mean()
def calc_gradient_penalty(self, netD, real_data, fake_data):
alpha = torch.rand(1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD.forward(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA
return gradient_penalty
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,
gpu_ids=[], padding_type='reflect'):
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks):
model += [
ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[], skip=False, opt=None):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
self.opt = opt
# currently support only input_nc == output_nc
assert (input_nc == output_nc)
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, norm_layer=norm_layer, innermost=True, opt=opt)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer,
use_dropout=use_dropout, opt=opt)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer, opt=opt)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer, opt=opt)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block, norm_layer=norm_layer, opt=opt)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer, opt=opt)
if skip == True:
skipmodule = SkipModule(unet_block, opt)
self.model = skipmodule
else:
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class SkipModule(nn.Module):
def __init__(self, submodule, opt):
super(SkipModule, self).__init__()
self.submodule = submodule
self.opt = opt
def forward(self, x):
latent = self.submodule(x)
return self.opt.skip * x + latent, latent
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False,
opt=None):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if opt.use_norm == 0:
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
else:
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw - 1) / 2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
# if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
# return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
# else:
return self.model(input)
class NoNormDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[]):
super(NoNormDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw - 1) / 2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
# if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
# return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
# else:
return self.model(input)
class FCDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[], patch=False):
super(FCDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
self.use_sigmoid = use_sigmoid
kw = 4
padw = int(np.ceil((kw - 1) / 2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if patch:
self.linear = nn.Linear(7 * 7, 1)
else:
self.linear = nn.Linear(13 * 13, 1)
if use_sigmoid:
self.sigmoid = nn.Sigmoid()
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""
:param input:
:return:
"""
batchsize = input.size()[0]
output = self.model(input)
output = output.view(batchsize, -1)
# print(output.size())
output = self.linear(output)
if self.use_sigmoid:
print("sigmoid")
output = self.sigmoid(output)
return output
class Unet_resize_conv(nn.Module):
def __init__(self, opt, skip):
"""
:param opt:
:param skip:
"""
super(Unet_resize_conv, self).__init__()
self.opt = opt
self.skip = skip
# pad = 1
# ## is use gray as self attention
# self.downsample_1 = nn.MaxPool2d(2)
# self.downsample_2 = nn.MaxPool2d(2)
# self.downsample_3 = nn.MaxPool2d(2)
# self.downsample_4 = nn.MaxPool2d(2)
# self.conv1_1 = nn.Conv2d(4, 32, 3, padding=1)
# self.conv1_1 = nn.Conv2d(4, 32, 3, padding=1) # 4 channels
self.conv1_1 = nn.Conv2d(3, 32, 3, padding=1) # 3 channels
self.LReLU1_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn1_1 = nn.BatchNorm2d(32)
self.conv1_2 = nn.Conv2d(32, 32, 3, padding=1)
self.LReLU1_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn1_2 = nn.BatchNorm2d(32)
self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2_1 = nn.Conv2d(32, 64, 3, padding=1)
self.LReLU2_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn2_1 = nn.BatchNorm2d(64)
self.conv2_2 = nn.Conv2d(64, 64, 3, padding=1)
self.LReLU2_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn2_2 = nn.BatchNorm2d(64)
self.max_pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv3_1 = nn.Conv2d(64, 128, 3, padding=1)
self.LReLU3_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn3_1 = nn.BatchNorm2d(128)
self.conv3_2 = nn.Conv2d(128, 128, 3, padding=1)
self.LReLU3_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn3_2 = nn.BatchNorm2d(128)
self.max_pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv4_1 = nn.Conv2d(128, 256, 3, padding=1)
self.LReLU4_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn4_1 = nn.BatchNorm2d(256)
self.conv4_2 = nn.Conv2d(256, 256, 3, padding=1)
self.LReLU4_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn4_2 = nn.BatchNorm2d(256)
self.max_pool4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv5_1 = nn.Conv2d(256, 512, 3, padding=1)
self.LReLU5_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn5_1 = nn.BatchNorm2d(512)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.LReLU5_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn5_2 = nn.BatchNorm2d(512)
# self.deconv5 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.deconv5 = nn.Conv2d(512, 256, 3, padding=1)
self.conv6_1 = nn.Conv2d(512, 256, 3, padding=1)
self.LReLU6_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn6_1 = nn.BatchNorm2d(256)
self.conv6_2 = nn.Conv2d(256, 256, 3, padding=1)
self.LReLU6_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn6_2 = nn.BatchNorm2d(256)
# self.deconv6 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.deconv6 = nn.Conv2d(256, 128, 3, padding=1)
self.conv7_1 = nn.Conv2d(256, 128, 3, padding=1)
self.LReLU7_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn7_1 = nn.BatchNorm2d(128)
self.conv7_2 = nn.Conv2d(128, 128, 3, padding=1)
self.LReLU7_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn7_2 = nn.BatchNorm2d(128)
# self.deconv7 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.deconv7 = nn.Conv2d(128, 64, 3, padding=1)
self.conv8_1 = nn.Conv2d(128, 64, 3, padding=1)
self.LReLU8_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn8_1 = nn.BatchNorm2d(64)
self.conv8_2 = nn.Conv2d(64, 64, 3, padding=1)
self.LReLU8_2 = nn.LeakyReLU(0.2, inplace=True)
self.bn8_2 = nn.BatchNorm2d(64)
# self.deconv8 = nn.ConvTranspose2d(64, 32, 2, stride=2)
self.deconv8 = nn.Conv2d(64, 32, 3, padding=1)
self.conv9_1 = nn.Conv2d(64, 32, 3, padding=1)
self.LReLU9_1 = nn.LeakyReLU(0.2, inplace=True)
self.bn9_1 = nn.BatchNorm2d(32)
self.conv9_2 = nn.Conv2d(32, 32, 3, padding=1)
self.LReLU9_2 = nn.LeakyReLU(0.2, inplace=True)
self.conv10 = nn.Conv2d(32, 3, 1)
def depth_to_space(self, input, block_size):
"""
:param input:
:param block_size:
:return:
"""
block_size_sq = block_size * block_size
output = input.permute(0, 2, 3, 1)
(batch_size, d_height, d_width, d_depth) = output.size()
s_depth = int(d_depth / block_size_sq)
s_width = int(d_width * block_size)
s_height = int(d_height * block_size)
t_1 = output.resize(batch_size, d_height, d_width, block_size_sq, s_depth)
spl = t_1.split(block_size, 3)
stack = [t_t.resize(batch_size, d_height, s_width, s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4).resize(batch_size,
s_height,
s_width,
s_depth)
output = output.permute(0, 3, 1, 2)
return output
def forward_simplify(self, input, gray):
"""
:param input:
:param gray:
:return:
"""
# print("gray.shape: ", gray.shape)
gray_2 = self.downsample_1(gray) # // 2
# print("gray_2.shape: ", gray_2.shape)
gray_3 = self.downsample_2(gray_2) # // 4
# print("gray_3.shape: ", gray_3.shape)
gray_4 = self.downsample_3(gray_3) # // 8
# print("gray_4.shape: ", gray_4.shape)
gray_5 = self.downsample_4(gray_4) # // 16
# print("gray_5.shape: ", gray_5.shape)
x = self.bn1_1(self.LReLU1_1(self.conv1_1(torch.cat((input, gray), 1)))) # 4 channels
# x = self.bn1_1(self.LReLU1_1(self.conv1_1(input)))
conv1 = self.bn1_2(self.LReLU1_2(self.conv1_2(x)))
x = self.max_pool1(conv1)
x = self.bn2_1(self.LReLU2_1(self.conv2_1(x)))
conv2 = self.bn2_2(self.LReLU2_2(self.conv2_2(x)))
x = self.max_pool2(conv2)
x = self.bn3_1(self.LReLU3_1(self.conv3_1(x)))
conv3 = self.bn3_2(self.LReLU3_2(self.conv3_2(x)))
x = self.max_pool3(conv3)
x = self.bn4_1(self.LReLU4_1(self.conv4_1(x)))
conv4 = self.bn4_2(self.LReLU4_2(self.conv4_2(x)))
x = self.max_pool4(conv4)
x = self.bn5_1(self.LReLU5_1(self.conv5_1(x)))
x = x * gray_5
conv5 = self.bn5_2(self.LReLU5_2(self.conv5_2(x)))
conv5 = F.interpolate(conv5, scale_factor=2, mode='bilinear', align_corners=False)
conv4 = conv4 * gray_4
up6 = torch.cat([self.deconv5(conv5), conv4], 1)
x = self.bn6_1(self.LReLU6_1(self.conv6_1(up6)))
conv6 = self.bn6_2(self.LReLU6_2(self.conv6_2(x)))
conv6 = F.interpolate(conv6, scale_factor=2, mode='bilinear', align_corners=False)
conv3 = conv3 * gray_3
up7 = torch.cat([self.deconv6(conv6), conv3], 1)
x = self.bn7_1(self.LReLU7_1(self.conv7_1(up7)))
conv7 = self.bn7_2(self.LReLU7_2(self.conv7_2(x)))
conv7 = F.interpolate(conv7, scale_factor=2, mode='bilinear', align_corners=False)
conv2 = conv2 * gray_2
up8 = torch.cat([self.deconv7(conv7), conv2], 1)
x = self.bn8_1(self.LReLU8_1(self.conv8_1(up8)))
conv8 = self.bn8_2(self.LReLU8_2(self.conv8_2(x)))
conv8 = F.interpolate(conv8, scale_factor=2, mode='bilinear', align_corners=False)
conv1 = conv1 * gray
up9 = torch.cat([self.deconv8(conv8), conv1], 1)
x = self.bn9_1(self.LReLU9_1(self.conv9_1(up9)))
conv9 = self.LReLU9_2(self.conv9_2(x))
latent = self.conv10(conv9)
latent = latent * gray
# output = self.depth_to_space(conv10, 2)
output = latent + input
return output, latent
def forward(self, input):
"""
:param input:
:return:
"""
# x = self.bn1_1(self.LReLU1_1(self.conv1_1(torch.cat((input, gray), 1)))) # 4 channels
x = self.bn1_1(self.LReLU1_1(self.conv1_1(input))) # 3 channels
conv1 = self.bn1_2(self.LReLU1_2(self.conv1_2(x)))
x = self.max_pool1(conv1) # // 2
x = self.bn2_1(self.LReLU2_1(self.conv2_1(x)))
conv2 = self.bn2_2(self.LReLU2_2(self.conv2_2(x)))
x = self.max_pool2(conv2)
x = self.bn3_1(self.LReLU3_1(self.conv3_1(x)))
conv3 = self.bn3_2(self.LReLU3_2(self.conv3_2(x)))
x = self.max_pool3(conv3)
x = self.bn4_1(self.LReLU4_1(self.conv4_1(x)))
conv4 = self.bn4_2(self.LReLU4_2(self.conv4_2(x)))
x = self.max_pool4(conv4)
x = self.bn5_1(self.LReLU5_1(self.conv5_1(x)))
conv5 = self.bn5_2(self.LReLU5_2(self.conv5_2(x)))
conv5 = F.interpolate(conv5, scale_factor=2, mode='nearest')
up6 = torch.cat([self.deconv5(conv5), conv4], 1)
x = self.bn6_1(self.LReLU6_1(self.conv6_1(up6)))
conv6 = self.bn6_2(self.LReLU6_2(self.conv6_2(x)))
conv6 = F.interpolate(conv6, scale_factor=2, mode='nearest')
up7 = torch.cat([self.deconv6(conv6), conv3], 1)
x = self.bn7_1(self.LReLU7_1(self.conv7_1(up7)))
conv7 = self.bn7_2(self.LReLU7_2(self.conv7_2(x)))
conv7 = F.interpolate(conv7, scale_factor=2, mode='nearest')
up8 = torch.cat([self.deconv7(conv7), conv2], 1)
x = self.bn8_1(self.LReLU8_1(self.conv8_1(up8)))
conv8 = self.bn8_2(self.LReLU8_2(self.conv8_2(x)))
conv8 = F.interpolate(conv8, scale_factor=2, mode='nearest')
up9 = torch.cat([self.deconv8(conv8), conv1], 1)
x = self.bn9_1(self.LReLU9_1(self.conv9_1(up9)))
conv9 = self.LReLU9_2(self.conv9_2(x))
latent = self.conv10(conv9)
# output = self.depth_to_space(conv10, 2)
output = latent + input
return output, latent
def forward_once(self, input, gray):
"""
:param input:
:param gray:
:return:
"""
## resize: TODO: pre-process in dataset
# flag = 0
# if input.size()[3] > 2200: # if width > 2200
# avg = nn.AvgPool2d(2)
# input = avg(input)
# gray = avg(gray)
# flag = 1
# # pass
# ## ----- Padding the tensor
# input, pad_left, pad_right, pad_top, pad_bottom = pad_tensor(input)
# gray, pad_left, pad_right, pad_top, pad_bottom = pad_tensor(gray)
if self.opt.self_attention: # go this way
# print("gray.shape: ", gray.shape)
gray_2 = self.downsample_1(gray) # // 2
# print("gray_2.shape: ", gray_2.shape)
gray_3 = self.downsample_2(gray_2) # // 4
# print("gray_3.shape: ", gray_3.shape)
gray_4 = self.downsample_3(gray_3) # // 8
# print("gray_4.shape: ", gray_4.shape)
gray_5 = self.downsample_4(gray_4) # //16
# print("gray_5.shape: ", gray_5.shape)
if self.opt.use_norm == 1:
if self.opt.self_attention:
x = self.bn1_1(self.LReLU1_1(self.conv1_1(torch.cat((input, gray), 1)))) # 4 channels
# x = self.bn1_1(self.LReLU1_1(self.conv1_1(input)))
else:
x = self.bn1_1(self.LReLU1_1(self.conv1_1(input)))
conv1 = self.bn1_2(self.LReLU1_2(self.conv1_2(x)))
x = self.max_pool1(conv1)
x = self.bn2_1(self.LReLU2_1(self.conv2_1(x)))
conv2 = self.bn2_2(self.LReLU2_2(self.conv2_2(x)))
x = self.max_pool2(conv2)
x = self.bn3_1(self.LReLU3_1(self.conv3_1(x)))
conv3 = self.bn3_2(self.LReLU3_2(self.conv3_2(x)))
x = self.max_pool3(conv3)
x = self.bn4_1(self.LReLU4_1(self.conv4_1(x)))
conv4 = self.bn4_2(self.LReLU4_2(self.conv4_2(x)))
x = self.max_pool4(conv4)
x = self.bn5_1(self.LReLU5_1(self.conv5_1(x)))
x = x * gray_5 if self.opt.self_attention else x
conv5 = self.bn5_2(self.LReLU5_2(self.conv5_2(x)))
conv5 = F.interpolate(conv5, scale_factor=2, mode='bilinear', align_corners=False)
conv4 = conv4 * gray_4 if self.opt.self_attention else conv4
up6 = torch.cat([self.deconv5(conv5), conv4], 1)
x = self.bn6_1(self.LReLU6_1(self.conv6_1(up6)))
conv6 = self.bn6_2(self.LReLU6_2(self.conv6_2(x)))
conv6 = F.interpolate(conv6, scale_factor=2, mode='bilinear', align_corners=False)
conv3 = conv3 * gray_3 if self.opt.self_attention else conv3
up7 = torch.cat([self.deconv6(conv6), conv3], 1)
x = self.bn7_1(self.LReLU7_1(self.conv7_1(up7)))
conv7 = self.bn7_2(self.LReLU7_2(self.conv7_2(x)))
conv7 = F.interpolate(conv7, scale_factor=2, mode='bilinear', align_corners=False)
conv2 = conv2 * gray_2 if self.opt.self_attention else conv2
up8 = torch.cat([self.deconv7(conv7), conv2], 1)
x = self.bn8_1(self.LReLU8_1(self.conv8_1(up8)))
conv8 = self.bn8_2(self.LReLU8_2(self.conv8_2(x)))
conv8 = F.interpolate(conv8, scale_factor=2, mode='bilinear', align_corners=False)
conv1 = conv1 * gray if self.opt.self_attention else conv1
up9 = torch.cat([self.deconv8(conv8), conv1], 1)
x = self.bn9_1(self.LReLU9_1(self.conv9_1(up9)))
conv9 = self.LReLU9_2(self.conv9_2(x))
latent = self.conv10(conv9)
if self.opt.times_residual:
latent = latent * gray
# output = self.depth_to_space(conv10, 2)
if self.opt.tanh: # do not go this way
latent = self.tanh(latent)
if self.skip: # ?
if self.opt.linear_add:
if self.opt.latent_threshold:
latent = F.relu(latent)
elif self.opt.latent_norm:
latent = (latent - torch.min(latent)) / (torch.max(latent) - torch.min(latent))
input = (input - torch.min(input)) / (torch.max(input) - torch.min(input))
output = latent + input * self.opt.skip
output = output * 2 - 1
else:
if self.opt.latent_threshold:
latent = F.relu(latent)
elif self.opt.latent_norm:
latent = (latent - torch.min(latent)) / (torch.max(latent) - torch.min(latent))
output = latent + input * self.opt.skip
else:
output = latent
if self.opt.linear:
output = output / torch.max(torch.abs(output))
elif self.opt.use_norm == 0:
if self.opt.self_attention:
x = self.LReLU1_1(self.conv1_1(torch.cat((input, gray), 1)))
else:
x = self.LReLU1_1(self.conv1_1(input))
conv1 = self.LReLU1_2(self.conv1_2(x))
x = self.max_pool1(conv1)
x = self.LReLU2_1(self.conv2_1(x))
conv2 = self.LReLU2_2(self.conv2_2(x))
x = self.max_pool2(conv2)
x = self.LReLU3_1(self.conv3_1(x))
conv3 = self.LReLU3_2(self.conv3_2(x))
x = self.max_pool3(conv3)
x = self.LReLU4_1(self.conv4_1(x))
conv4 = self.LReLU4_2(self.conv4_2(x))
x = self.max_pool4(conv4)
x = self.LReLU5_1(self.conv5_1(x))
x = x * gray_5 if self.opt.self_attention else x
conv5 = self.LReLU5_2(self.conv5_2(x))
conv5 = F.interpolate(conv5, scale_factor=2, mode='bilinear', align_corners=False)
conv4 = conv4 * gray_4 if self.opt.self_attention else conv4
up6 = torch.cat([self.deconv5(conv5), conv4], 1)
x = self.LReLU6_1(self.conv6_1(up6))
conv6 = self.LReLU6_2(self.conv6_2(x))
conv6 = F.interpolate(conv6, scale_factor=2, mode='bilinear', align_corners=False)
conv3 = conv3 * gray_3 if self.opt.self_attention else conv3
up7 = torch.cat([self.deconv6(conv6), conv3], 1)
x = self.LReLU7_1(self.conv7_1(up7))
conv7 = self.LReLU7_2(self.conv7_2(x))
conv7 = F.interpolate(conv7, scale_factor=2, mode='bilinear', align_corners=False)
conv2 = conv2 * gray_2 if self.opt.self_attention else conv2
up8 = torch.cat([self.deconv7(conv7), conv2], 1)
x = self.LReLU8_1(self.conv8_1(up8))
conv8 = self.LReLU8_2(self.conv8_2(x))
conv8 = F.interpolate(conv8, scale_factor=2, mode='bilinear', align_corners=False)
conv1 = conv1 * gray if self.opt.self_attention else conv1
up9 = torch.cat([self.deconv8(conv8), conv1], 1)
x = self.LReLU9_1(self.conv9_1(up9))
conv9 = self.LReLU9_2(self.conv9_2(x))
latent = self.conv10(conv9)
if self.opt.times_residual:
latent = latent * gray
if self.opt.tanh:
latent = self.tanh(latent)
if self.skip:
if self.opt.linear_add:
if self.opt.latent_threshold:
latent = F.relu(latent)
elif self.opt.latent_norm:
latent = (latent - torch.min(latent)) / (torch.max(latent) - torch.min(latent))
input = (input - torch.min(input)) / (torch.max(input) - torch.min(input))
output = latent + input * self.opt.skip
output = output * 2 - 1
else:
if self.opt.latent_threshold:
latent = F.relu(latent)
elif self.opt.latent_norm:
latent = (latent - torch.min(latent)) / (torch.max(latent) - torch.min(latent))
output = latent + input * self.opt.skip
else:
output = latent
if self.opt.linear:
output = output / torch.max(torch.abs(output))
# ## Padding tensor back
# output = pad_tensor_back(output, pad_left, pad_right, pad_top, pad_bottom)
# latent = pad_tensor_back(latent, pad_left, pad_right, pad_top, pad_bottom)
# gray = pad_tensor_back(gray, pad_left, pad_right, pad_top, pad_bottom)
# if flag == 1:
# output = F.interpolate(output, scale_factor=2, mode='bilinear', align_corners=False)
# gray = F.interpolate(gray, scale_factor=2, mode='bilinear', align_corners=False)
if self.skip:
return output, latent
else:
return output
class DnCNN(nn.Module):
def __init__(self, opt=None, depth=17, n_channels=64, image_channels=1, use_bnorm=True, kernel_size=3):
super(DnCNN, self).__init__()
kernel_size = 3
padding = 1
layers = []
layers.append(
nn.Conv2d(in_channels=image_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding,
bias=True))
layers.append(nn.ReLU(inplace=True))
for _ in range(depth - 2):
layers.append(
nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding,
bias=False))
layers.append(nn.BatchNorm2d(n_channels, eps=0.0001, momentum=0.95))
layers.append(nn.ReLU(inplace=True))
layers.append(
nn.Conv2d(in_channels=n_channels, out_channels=image_channels, kernel_size=kernel_size, padding=padding,
bias=False))
self.dncnn = nn.Sequential(*layers)
self._initialize_weights()
def forward(self, x):
y = x
out = self.dncnn(x)
return y + out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class Vgg16(nn.Module):
def __init__(self):
super(Vgg16, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
def forward(self, X, opt):
h = F.relu(self.conv1_1(X), inplace=True)
h = F.relu(self.conv1_2(h), inplace=True)
# relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h), inplace=True)
h = F.relu(self.conv2_2(h), inplace=True)
# relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h), inplace=True)
h = F.relu(self.conv3_2(h), inplace=True)
h = F.relu(self.conv3_3(h), inplace=True)
# relu3_3 = h
if opt.vgg_choose != "no_maxpool":
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h), inplace=True)
relu4_1 = h
h = F.relu(self.conv4_2(h), inplace=True)
relu4_2 = h
conv4_3 = self.conv4_3(h)
h = F.relu(conv4_3, inplace=True)
relu4_3 = h
if opt.vgg_choose != "no_maxpool":
if opt.vgg_maxpooling:
h = F.max_pool2d(h, kernel_size=2, stride=2)
relu5_1 = F.relu(self.conv5_1(h), inplace=True)
relu5_2 = F.relu(self.conv5_2(relu5_1), inplace=True)
conv5_3 = self.conv5_3(relu5_2)
h = F.relu(conv5_3, inplace=True)
relu5_3 = h
if opt.vgg_choose == "conv4_3":
return conv4_3
elif opt.vgg_choose == "relu4_2":
return relu4_2
elif opt.vgg_choose == "relu4_1":
return relu4_1
elif opt.vgg_choose == "relu4_3":
return relu4_3
elif opt.vgg_choose == "conv5_3":
return conv5_3
elif opt.vgg_choose == "relu5_1":
return relu5_1
elif opt.vgg_choose == "relu5_2":
return relu5_2
elif opt.vgg_choose == "relu5_3" or "maxpool":
return relu5_3
def vgg_preprocess(batch, opt):
tensortype = type(batch.data)
(r, g, b) = torch.chunk(batch, 3, dim=1)
batch = torch.cat((b, g, r), dim=1) # convert RGB to BGR
batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]
if opt.vgg_mean:
mean = tensortype(batch.data.size())
mean[:, 0, :, :] = 103.939
mean[:, 1, :, :] = 116.779
mean[:, 2, :, :] = 123.680
batch = batch.sub(Variable(mean)) # subtract mean
return batch
class PerceptualLoss(nn.Module):
def __init__(self, opt):
super(PerceptualLoss, self).__init__()
self.opt = opt
self.instancenorm = nn.InstanceNorm2d(512, affine=False)
def compute_vgg_loss(self, vgg, img, target):
img_vgg = vgg_preprocess(img, self.opt)
target_vgg = vgg_preprocess(target, self.opt)
img_fea = vgg(img_vgg, self.opt)
target_fea = vgg(target_vgg, self.opt)
if self.opt.no_vgg_instance:
return torch.mean((img_fea - target_fea) ** 2)
else:
return torch.mean((self.instancenorm(img_fea) - self.instancenorm(target_fea)) ** 2)
def load_vgg16(model_dir, gpu_ids):
"""
Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py
:param model_dir:
:param gpu_ids:
:return:
"""
if not os.path.exists(model_dir):
os.mkdir(model_dir)
# if not os.path.exists(os.path.join(model_dir, 'vgg16.weight')):
# if not os.path.exists(os.path.join(model_dir, 'vgg16.t7')):
# os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O ' + os.path.join(model_dir, 'vgg16.t7'))
# vgglua = load_lua(os.path.join(model_dir, 'vgg16.t7'))
# vgg = Vgg16()
# for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
# dst.data[:] = src
# torch.save(vgg.state_dict(), os.path.join(model_dir, 'vgg16.weight'))
vgg = Vgg16()
# vgg.cuda()
vgg.cuda(device=gpu_ids[0])
vgg.load_state_dict(torch.load(os.path.join(model_dir, 'vgg16.weight')))
vgg = torch.nn.DataParallel(vgg, gpu_ids)
return vgg
class FCN32s(nn.Module):
def __init__(self, n_class=21):
super(FCN32s, self).__init__()
# conv1
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2
# conv2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4
# conv3
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8
# conv4
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16
# conv5
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32
# fc6
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
# fc7
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.upscore = nn.ConvTranspose2d(n_class, n_class, 64, stride=32,
bias=False)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = get_upsampling_weight(
m.in_channels, m.out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore(h)
h = h[:, :, 19:19 + x.size()[2], 19:19 + x.size()[3]].contiguous()
return h
def load_fcn(model_dir):
fcn = FCN32s()
fcn.load_state_dict(torch.load(os.path.join(model_dir, 'fcn32s_from_caffe.pth')))
fcn.cuda()
return fcn
class SemanticLoss(nn.Module):
def __init__(self, opt):
super(SemanticLoss, self).__init__()
self.opt = opt
self.instancenorm = nn.InstanceNorm2d(21, affine=False)
def compute_fcn_loss(self, fcn, img, target):
img_fcn = vgg_preprocess(img, self.opt)
target_fcn = vgg_preprocess(target, self.opt)
img_fea = fcn(img_fcn)
target_fea = fcn(target_fcn)
return torch.mean((self.instancenorm(img_fea) - self.instancenorm(target_fea)) ** 2)
| 37.868457 | 129 | 0.561844 |
aa12c46c419eea2875449a679ef874d191718bc2 | 568 | py | Python | GUI/gui_sample_003.py | lcarlin/guppe | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | [
"Apache-2.0"
] | 1 | 2021-12-18T15:29:24.000Z | 2021-12-18T15:29:24.000Z | GUI/gui_sample_003.py | lcarlin/guppe | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | [
"Apache-2.0"
] | null | null | null | GUI/gui_sample_003.py | lcarlin/guppe | a0ee7b85e8687e8fb8243fbb509119a94bc6460f | [
"Apache-2.0"
] | 3 | 2021-08-23T22:45:20.000Z | 2022-02-17T13:17:09.000Z | from tkinter import *
class Application:
def __init__(self, master=None):
self.widget1 = Frame(master)
self.widget1.pack()
self.msg = Label(self.widget1, text="Primeiro widget")
self.msg["font"] = ("Verdana", "10", "italic", "bold")
self.msg.pack ()
self.sair = Button(self.widget1)
self.sair["text"] = "Sair"
self.sair["font"] = ("Calibri", "10")
self.sair["width"] = 5
self.sair["command"] = self.widget1.quit
self.sair.pack ()
root = Tk()
Application(root)
root.mainloop() | 31.555556 | 62 | 0.577465 |
73d6574d0215d2f26f4cb87c3460c0a4d2c84793 | 1,065 | py | Python | main.py | D28112003/freesms | bea573a68db282e93a2323d3d00f7ffc2c91eab5 | [
"MIT"
] | 3 | 2020-12-07T20:38:51.000Z | 2020-12-07T20:59:39.000Z | main.py | D28112003/freesms | bea573a68db282e93a2323d3d00f7ffc2c91eab5 | [
"MIT"
] | null | null | null | main.py | D28112003/freesms | bea573a68db282e93a2323d3d00f7ffc2c91eab5 | [
"MIT"
] | null | null | null | import os,time,sys,shutil
class Main:
def __init__(self):
self.detekos()
def menu(self):
print("""
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; S P A M S M S ;
;---------------------------;
; Author : Dhani ;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
NOTE: This tool's only work for Indonesia number phone, ngarti teu maneh.
1. SMS Gratis
2. OTP Matahari
3. OTP Hallodok
4. OTP Olx.co.id
5. OTP Sociolla.com
""")
pilih=int(input('Milih/> '))
if pilih == 1:
import src.payu
elif pilih == 2:
import src.matahari
elif pilih == 3:
import src.alodok
elif pilih == 4:
import src.olx
elif pilih == 5:
import src.socil
else: print("[!] lihat menu dong(o)");self.menu()
def detekos(self):
#remove cache
try:
shutil.rmtree("src/__pycache__")
except: pass
if os.name in ['nt','win32']:
os.system('cls')
else: os.system('clear')
self.menu()
try:
Main()
except KeyboardInterrupt:
exit('[Exit] Key interrupt')
except Exception as F:
print('Err: %s'%(F))
| 19.722222 | 74 | 0.53615 |
b50b1b2c1ef12620695ef09785e9f35ad5034bfe | 1,903 | py | Python | tests/gameman_test.py | stratts/savman | 16ba9a527c6448ba7570cce7b7430ef55561e660 | [
"MIT"
] | null | null | null | tests/gameman_test.py | stratts/savman | 16ba9a527c6448ba7570cce7b7430ef55561e660 | [
"MIT"
] | null | null | null | tests/gameman_test.py | stratts/savman | 16ba9a527c6448ba7570cce7b7430ef55561e660 | [
"MIT"
] | null | null | null | import pytest
from savman import gameman
@pytest.fixture
def gamedir(tmpdir):
groot = tmpdir.mkdir('Games')
gdir = groot.mkdir('MyGame')
f1 = gdir.join('test1.txt')
f2 = gdir.join('test2.txt')
f3 = gdir.join('test3.png')
f1.write('test1'); f2.write('test2'); f3.write('test3')
return gdir
@pytest.fixture
def dir1(tmpdir):
return tmpdir.mkdir('dir1')
@pytest.fixture
def dir2(tmpdir):
return tmpdir.mkdir('dir2')
@pytest.fixture
def customfile(tmpdir, dir1, dir2):
file = tmpdir.join('custom.txt')
custom = '''
---
name: My Game
directory: {}
include:
- folder1/* # Include all files from folder
exclude:
- '*.png'
---
name: My Game 2
directory: {}
'''.format(str(dir1), str(dir2))
file.write(custom)
return file
def test_load_custom(customfile, dir1, dir2):
gman = gameman.GameMan('DUMMY')
gman.load_custom(str(customfile))
assert 'MyGame' in gman.games
assert 'MyGame2' in gman.games
game1 = gman.games['MyGame']
game2 = gman.games['MyGame2']
assert game1.name == 'My Game'
assert game2.name == 'My Game 2'
assert game1.locations[0].path == str(dir1)
assert game2.locations[0].path == str(dir2)
assert 'folder1/*' in game1.locations[0].include
assert '*.png' in game1.locations[0].exclude
def test_find_games(tmpdir, gamedir):
database = {
'games': {'MyGame': {'name': 'My Game'}},
'locations': {
('MyGame', 0): {
'type': 'profile',
'profile_items': ['test1.txt', 'test2.txt'],
'profile_dir': 'MyGame',
'exclude': None, 'include': None, 'subdir': None
}
}
}
gman = gameman.GameMan(database)
gman.finder.searchpaths = [str(tmpdir)]
gman.find_games()
assert 'MyGame' in gman.games
assert gman.games['MyGame'].locations[0].path == gamedir
| 26.068493 | 64 | 0.606411 |
e04e9b60c275f0a43829cac60b8941c0baa8a34d | 6,186 | py | Python | rasa/core/train.py | isaac-philip/rasa | 923db75e03921921a6f1f3489a2c5574138ee685 | [
"Apache-2.0"
] | 1 | 2020-06-15T02:06:41.000Z | 2020-06-15T02:06:41.000Z | rasa/core/train.py | isaac-philip/rasa | 923db75e03921921a6f1f3489a2c5574138ee685 | [
"Apache-2.0"
] | 49 | 2020-06-23T11:26:22.000Z | 2022-02-01T13:22:24.000Z | rasa/core/train.py | isaac-philip/rasa | 923db75e03921921a6f1f3489a2c5574138ee685 | [
"Apache-2.0"
] | 1 | 2020-07-01T12:07:55.000Z | 2020-07-01T12:07:55.000Z | import argparse
import asyncio
import logging
import os
import tempfile
import typing
from typing import Dict, Optional, Text, Union, List
import rasa.utils.io
from rasa.constants import NUMBER_OF_TRAINING_STORIES_FILE, PERCENTAGE_KEY
from rasa.core.domain import Domain
from rasa.importers.importer import TrainingDataImporter
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.core.interpreter import NaturalLanguageInterpreter
from rasa.core.utils import AvailableEndpoints
logger = logging.getLogger(__name__)
async def train(
domain_file: Union[Domain, Text],
training_resource: Union[Text, "TrainingDataImporter"],
output_path: Text,
interpreter: Optional["NaturalLanguageInterpreter"] = None,
endpoints: "AvailableEndpoints" = None,
policy_config: Optional[Union[Text, Dict]] = None,
exclusion_percentage: Optional[int] = None,
additional_arguments: Optional[Dict] = None,
):
from rasa.core.agent import Agent
from rasa.core import config, utils
from rasa.core.utils import AvailableEndpoints
if not endpoints:
endpoints = AvailableEndpoints()
if not additional_arguments:
additional_arguments = {}
policies = config.load(policy_config)
agent = Agent(
domain_file,
generator=endpoints.nlg,
action_endpoint=endpoints.action,
interpreter=interpreter,
policies=policies,
)
data_load_args, additional_arguments = utils.extract_args(
additional_arguments,
{
"use_story_concatenation",
"unique_last_num_states",
"augmentation_factor",
"remove_duplicates",
"debug_plots",
},
)
training_data = await agent.load_data(
training_resource, exclusion_percentage=exclusion_percentage, **data_load_args
)
agent.train(training_data, **additional_arguments)
agent.persist(output_path)
return agent
async def train_comparison_models(
story_file: Text,
domain: Text,
output_path: Text = "",
exclusion_percentages: Optional[List] = None,
policy_configs: Optional[List] = None,
runs: int = 1,
additional_arguments: Optional[Dict] = None,
):
"""Train multiple models for comparison of policies"""
from rasa import model
from rasa.importers.importer import TrainingDataImporter
exclusion_percentages = exclusion_percentages or []
policy_configs = policy_configs or []
for r in range(runs):
logging.info("Starting run {}/{}".format(r + 1, runs))
for current_run, percentage in enumerate(exclusion_percentages, 1):
for policy_config in policy_configs:
file_importer = TrainingDataImporter.load_core_importer_from_config(
policy_config, domain, [story_file]
)
config_name = os.path.splitext(os.path.basename(policy_config))[0]
logging.info(
"Starting to train {} round {}/{}"
" with {}% exclusion"
"".format(
config_name, current_run, len(exclusion_percentages), percentage
)
)
with TempDirectoryPath(tempfile.mkdtemp()) as train_path:
_, new_fingerprint = await asyncio.gather(
train(
domain,
file_importer,
train_path,
policy_config=policy_config,
exclusion_percentage=percentage,
additional_arguments=additional_arguments,
),
model.model_fingerprint(file_importer),
)
output_dir = os.path.join(output_path, "run_" + str(r + 1))
model_name = config_name + PERCENTAGE_KEY + str(percentage)
model.package_model(
fingerprint=new_fingerprint,
output_directory=output_dir,
train_path=train_path,
fixed_model_name=model_name,
)
async def get_no_of_stories(story_file: Text, domain: Text) -> int:
"""Get number of stories in a file."""
from rasa.core.domain import TemplateDomain
from rasa.core.training.dsl import StoryFileReader
stories = await StoryFileReader.read_from_folder(
story_file, TemplateDomain.load(domain)
)
return len(stories)
async def do_compare_training(
args: argparse.Namespace,
story_file: Text,
additional_arguments: Optional[Dict] = None,
):
_, no_stories = await asyncio.gather(
train_comparison_models(
story_file=story_file,
domain=args.domain,
output_path=args.out,
exclusion_percentages=args.percentages,
policy_configs=args.config,
runs=args.runs,
additional_arguments=additional_arguments,
),
get_no_of_stories(args.stories, args.domain),
)
# store the list of the number of stories present at each exclusion
# percentage
story_range = [
no_stories - round((x / 100.0) * no_stories) for x in args.percentages
]
training_stories_per_model_file = os.path.join(
args.out, NUMBER_OF_TRAINING_STORIES_FILE
)
rasa.utils.io.dump_obj_as_json_to_file(training_stories_per_model_file, story_range)
def do_interactive_learning(
args: argparse.Namespace, file_importer: TrainingDataImporter
):
from rasa.core.training import interactive
interactive.run_interactive_learning(
file_importer=file_importer,
skip_visualization=args.skip_visualization,
conversation_id=args.conversation_id,
server_args=args.__dict__,
)
if __name__ == "__main__":
raise RuntimeError(
"Calling `rasa.core.train` directly is no longer supported. Please use "
"`rasa train` to train a combined Core and NLU model or `rasa train core` "
"to train a Core model."
)
| 32.557895 | 88 | 0.640155 |
f1193420cf2b163275957c20875fd7f0faa17b0c | 2,908 | py | Python | src/m2_robot_code.py | lcopland18/99-CapstoneProject-201930 | 34b68b0010d649a9bf503b1da6d9c53a36aad4b7 | [
"MIT"
] | null | null | null | src/m2_robot_code.py | lcopland18/99-CapstoneProject-201930 | 34b68b0010d649a9bf503b1da6d9c53a36aad4b7 | [
"MIT"
] | null | null | null | src/m2_robot_code.py | lcopland18/99-CapstoneProject-201930 | 34b68b0010d649a9bf503b1da6d9c53a36aad4b7 | [
"MIT"
] | null | null | null | """
Capstone Project. Code to run on the EV3 robot (NOT on a laptop).
Author: Your professors (for the framework)
and Lauren Copland.
Spring term, 2018-2019.
"""
# DONE 1: Put your name in the above.
import math
import mqtt_remote_method_calls as mqtt
import rosebot
import m2_robot_code as m2
import m3_robot_code as m3
class MyRobotDelegate(object):
"""
Defines methods that are called by the MQTT listener when that listener
gets a message (name of the method, plus its arguments)
from a LAPTOP via MQTT.
"""
def __init__(self, robot):
self.robot = robot # type: rosebot.RoseBot
self.mqtt_sender = None # type: mqtt.MqttClient
self.is_time_to_quit = False # Set this to True to exit the robot code
def set_mqtt_sender(self, mqtt_sender):
self.mqtt_sender = mqtt_sender
def stop(self):
""" Tells the robot to stop moving. """
print_message_received("stop")
self.robot.drive_system.stop()
# DONE: Add methods here as needed.
def spin_left(self,left_speed,right_speed,degrees):
print("Spin Left Received",left_speed,right_speed,degrees)
distance = degrees * 5.0 #CHANGE CONSTANT
self.robot.drive_system.right_motor.reset_position()
self.robot.drive_system.go(left_speed,right_speed)
while True:
if abs(self.robot.drive_system.right_motor.get_position()) >= distance:
self.robot.drive_system.stop()
break
def spin_right(self,left_speed,right_speed,degrees):
print("Spin Right Received",left_speed,right_speed,degrees)
distance = degrees * 5.0 #CHANGE CONSTANT
self.robot.drive_system.left_motor.reset_position()
self.robot.drive_system.go(left_speed,right_speed)
while True:
if abs(self.robot.drive_system.left_motor.get_position()) >= distance:
self.robot.drive_system.stop()
break
def spin_until_facing(self,signature,x,delta,speed):
print("Spin Until Facing Received")
big_enough = 1500
signature = "SIG" + str(signature)
self.robot.sensor_system.camera.set_signature(signature)
while True: #RETURN BLOB VALUES
blob = self.robot.sensor_system.camera.get_biggest_blob()
print("While",signature,x,delta,blob,big_enough,blob.get_area())
if x - blob.center.x <= delta and blob.get_area()>big_enough:
self.robot.drive_system.stop()
print("Broken",blob.get_area(),big_enough)
break
self.spin_right(speed, -speed, 1) # SPIN CLOCKWISE
def print_message_received(method_name, arguments=None):
print()
print("The robot's delegate has received a message")
print("for the ", method_name, " method, with arguments", arguments)
# DONE: Add functions here as needed.
| 35.901235 | 83 | 0.665062 |
222e6cb844a66643fa06ceaa24e41e4613bcbca5 | 1,990 | py | Python | projects/api/predictions.py | ndarvishev/projects | 6a9855c5f8af8fad2799ef7a203e126b834c5056 | [
"Apache-2.0"
] | 1 | 2021-06-26T19:13:49.000Z | 2021-06-26T19:13:49.000Z | projects/api/predictions.py | ndarvishev/projects | 6a9855c5f8af8fad2799ef7a203e126b834c5056 | [
"Apache-2.0"
] | null | null | null | projects/api/predictions.py | ndarvishev/projects | 6a9855c5f8af8fad2799ef7a203e126b834c5056 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Predictions API Router."""
from json.decoder import JSONDecodeError
from typing import Optional
from fastapi import APIRouter, Depends, File, Request, UploadFile
from sqlalchemy.orm import Session
from projects.controllers import DeploymentController, PredictionController, \
ProjectController
from projects.exceptions import BadRequest
from projects.database import session_scope
router = APIRouter(
prefix="/projects/{project_id}/deployments/{deployment_id}/predictions",
)
@router.post("")
async def handle_post_prediction(project_id: str,
deployment_id: str,
request: Request,
file: Optional[UploadFile] = File(None),
session: Session = Depends(session_scope)):
"""
Handles POST request to /.
Parameters
-------
project_id : str
deployment_id : str
request : starlette.requests.Request
file : starlette.datastructures.UploadFile
session : sqlalchemy.orm.session.Session
Returns
-------
dict
"""
project_controller = ProjectController(session)
project_controller.raise_if_project_does_not_exist(project_id)
deployment_controller = DeploymentController(session)
deployment_controller.raise_if_deployment_does_not_exist(deployment_id)
# at this endpoint, we can accept both form-data and json as the request content-type
kwargs = {}
if file is not None:
kwargs = {"upload_file": file}
else:
try:
kwargs = await request.json()
except JSONDecodeError:
raise BadRequest("either form-data or json is required")
prediction_controller = PredictionController(session)
return prediction_controller.create_prediction(project_id=project_id,
deployment_id=deployment_id,
**kwargs)
| 33.166667 | 89 | 0.650754 |
867c3f4b854a478bb47bb4ed854501f327dc62e9 | 453 | py | Python | datastrucutre/array/peak_element.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | 1 | 2021-05-02T05:43:34.000Z | 2021-05-02T05:43:34.000Z | datastrucutre/array/peak_element.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | null | null | null | datastrucutre/array/peak_element.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | null | null | null | def peak(arr, low, high):
n = len(arr)
while low <= high:
mid = low + (high - low) / 2
mid = int(mid)
if (mid == 0 or arr[mid-1] <= arr[mid]) and (mid == n-1 or arr[mid+1] <= arr[mid]):
return(arr[mid])
elif mid > 0 and arr[mid-1] > arr[mid]:
high = mid - 1
else:
low = mid + 1
arr = [1, 3, 20, 4, 1, 0]
print(peak(arr, 0, len(arr) - 1))
| 23.842105 | 92 | 0.415011 |
531e211209f6f22d0d03e9f316ed6071bccf6749 | 418 | py | Python | labinfo11/venv/Scripts/pip3-script.py | MatiwsxD/ayed-2019-1 | a5fdbe3a055405150122cf3875cdb0c6afd9eff0 | [
"MIT"
] | null | null | null | labinfo11/venv/Scripts/pip3-script.py | MatiwsxD/ayed-2019-1 | a5fdbe3a055405150122cf3875cdb0c6afd9eff0 | [
"MIT"
] | null | null | null | labinfo11/venv/Scripts/pip3-script.py | MatiwsxD/ayed-2019-1 | a5fdbe3a055405150122cf3875cdb0c6afd9eff0 | [
"MIT"
] | null | null | null | #!C:\Users\MatiwsxD\PycharmProjects\labinfo11\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| 32.153846 | 69 | 0.674641 |
82f1d32ed1067cf1f19d85e53ebe487485f2ac3a | 384 | py | Python | src/assignments/assignment10/customer.py | acc-cosc-1336/cosc-1336-spring-2018-zachdiamond000 | 1bcb2b6f75d9b8dd38a77f075f82f541e774411c | [
"MIT"
] | null | null | null | src/assignments/assignment10/customer.py | acc-cosc-1336/cosc-1336-spring-2018-zachdiamond000 | 1bcb2b6f75d9b8dd38a77f075f82f541e774411c | [
"MIT"
] | null | null | null | src/assignments/assignment10/customer.py | acc-cosc-1336/cosc-1336-spring-2018-zachdiamond000 | 1bcb2b6f75d9b8dd38a77f075f82f541e774411c | [
"MIT"
] | null | null | null | '''
Create a customer class
Add a constructor with parameters first, last, and phone_number
Create public attributes for first, last, and phone_number
STUDENT MUST ALSO MODIFY INVOICE CLASS TO USE THIS CLASS
SEE INVOICE file FOR INSTRUCTIONS
'''
class Customer:
def __init__(self,first, last,p_number):
self.first = ''
self.last = ''
self.p_number = ''
| 24 | 63 | 0.708333 |
45d642cf079908964e73e5c1e92413f25461c423 | 3,120 | py | Python | Co-Simulation/Sumo/sumo-1.7.0/tools/visualization/plot_summary.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 4 | 2020-11-13T02:35:56.000Z | 2021-03-29T20:15:54.000Z | Co-Simulation/Sumo/sumo-1.7.0/tools/visualization/plot_summary.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 9 | 2020-12-09T02:12:39.000Z | 2021-02-18T00:15:28.000Z | Co-Simulation/Sumo/sumo-1.7.0/tools/visualization/plot_summary.py | uruzahe/carla | 940c2ab23cce1eda1ef66de35f66b42d40865fb1 | [
"MIT"
] | 1 | 2020-11-20T19:31:26.000Z | 2020-11-20T19:31:26.000Z | #!/usr/bin/env python
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2013-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file plot_summary.py
# @author Daniel Krajzewicz
# @author Laura Bieker
# @date 2013-11-11
"""
This script plots a selected measure from a summary-output.
matplotlib (http://matplotlib.org/) has to be installed for this purpose
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
sys.path.append(os.path.join(os.environ['SUMO_HOME'], 'tools'))
import sumolib # noqa
from sumolib.visualization import helpers # noqa
import matplotlib.pyplot as plt # noqa
def readValues(files, verbose, measure):
ret = {}
for f in files:
if verbose:
print("Reading '%s'..." % f)
ret[f] = sumolib.output.parse_sax__asList(f, "step", [measure])
return ret
def main(args=None):
"""The main function; parses options and plots"""
# ---------- build and read options ----------
from optparse import OptionParser
optParser = OptionParser()
optParser.add_option("-i", "--summary-inputs", dest="summary", metavar="FILE",
help="Defines the summary-output files to use as input")
optParser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="If set, the script says what it's doing")
optParser.add_option("-m", "--measure", dest="measure",
default="running", help="Define which measure to plot")
# standard plot options
helpers.addInteractionOptions(optParser)
helpers.addPlotOptions(optParser)
# parse
options, _ = optParser.parse_args(args=args)
if options.summary is None:
print("Error: at least one summary file must be given")
sys.exit(1)
minV = 0
maxV = 0
files = options.summary.split(",")
nums = readValues(files, options.verbose, options.measure)
times = readValues(files, options.verbose, "time")
for f in files:
maxV = max(maxV, len(nums[f]))
range(minV, maxV + 1)
fig, ax = helpers.openFigure(options)
for i, f in enumerate(files):
v = sumolib.output.toList(nums[f], options.measure)
t = sumolib.output.toList(times[f], "time")
c = helpers.getColor(options, i, len(files))
plt.plot(t, v, label=helpers.getLabel(f, i, options), color=c)
helpers.closeFigure(fig, ax, options)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 35.862069 | 87 | 0.675321 |
e0a6d9ca7cd2a0f50cd902ea6c9ccc49e67f926c | 65,867 | py | Python | cryptocurrency_bot/main_bot.py | Hukyl/cryptocurrency_bot | 5fd97056f1c88aea406571d1183e3ce1b7a50474 | [
"MIT"
] | 1 | 2021-06-02T06:13:56.000Z | 2021-06-02T06:13:56.000Z | cryptocurrency_bot/main_bot.py | Hukyl/cryptocurrency_bot | 5fd97056f1c88aea406571d1183e3ce1b7a50474 | [
"MIT"
] | null | null | null | cryptocurrency_bot/main_bot.py | Hukyl/cryptocurrency_bot | 5fd97056f1c88aea406571d1183e3ce1b7a50474 | [
"MIT"
] | null | null | null | from concurrent import futures
import copy
import datetime
import threading
import time
import telebot
from telebot.types import LabeledPrice
import schedule
from configs import settings
from models.parsers import CurrencyExchanger
from models.user import User, Prediction, Session
from models import exceptions
from utils import (
get_proxy_list, prettify_float, get_json_config, substract_percent,
prettify_percent, prettify_utcoffset
)
from utils.translator import translate as _
from utils.telegram import kbs, inline_kbs
from utils.dt import (
convert_datetime, check_datetime_in_future, convert_from_country_format,
adapt_datetime, convert_to_country_format, get_now, get_country_dt_example,
adapt_check_times
)
telebot.apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(settings.TOKEN, threaded=False) # RecursionError
bot.full_bot_commands = {
'/start': 'запустить бота', # Start the bot
'/me': 'ваша информация', # Your info
'/today': 'котировки', # Quotes
'/change_checktime': 'сменить время оповещений', # Change check times
'/change_delta': 'сменить разницу в процентах, при которой оповещать',
# Change percent delta at which to notify
'/change_timezone': 'сменить ваш часовой пояс', # change your timezone
'/toggle_alarms': 'включить/выключить оповещения', # Toggle alarms
'/toggle_experts_predictions': 'включить/выключить прогнозы от экспертов',
# Toggle experts predictions
'/make_prediction': 'сделать прогноз', # Make a prediction
'/get_predictions': 'прогнозы', # Go to "Predictions" section
'/convert': 'конвертер валют', # Currency Converter
'/menu': 'главное меню', # Main menu
'/subscription': 'подписка', # Go to "Subscription" section
'/language': 'сменить язык', # Change language
'/techsupport': 'техподдержка', # Go to "Techsupport" section
'/help': 'помощь по командам', # Help with commands
}
bot.short_bot_commands = {
k: bot.full_bot_commands.get(k)
for k in ['/start', '/me', '/today', '/subscription', '/language', '/help']
}
bot.skip_pending = True
currency_parser = CurrencyExchanger(proxy_list=get_proxy_list())
USERS_SESSIONS = {}
###############################################################################
def get_or_create_session(chat_id):
global USERS_SESSIONS
try:
session = USERS_SESSIONS.get(chat_id)
if not session:
session = Session(chat_id)
settings.logger.debug(f"{session.user} logged in")
USERS_SESSIONS[chat_id] = session
except MemoryError:
for i in range(50):
USERS_SESSIONS.popitem()
return get_or_create_session(chat_id)
else:
return USERS_SESSIONS[chat_id]
# Used not to initialize the user every time, just save their state
@bot.middleware_handler(update_types=['message'])
def set_message_session(bot_instance, message):
bot_instance.session = get_or_create_session(message.chat.id)
# Used not to initialize the user every time, just save their state
@bot.middleware_handler(update_types=['callback_query'])
def set_call_session(bot_instance, call):
bot_instance.session = get_or_create_session(call.message.chat.id)
@bot.middleware_handler(update_types=['message'])
def check_if_command(bot_instance, message):
# answer for command, even if the `register_next_step_handler` is used
if message.entities:
is_bot_command = (
message.entities[0].type == 'bot_command' and
message.text in bot_instance.full_bot_commands
)
if is_bot_command:
try:
bot_instance.clear_step_handler(message)
except RecursionError:
pass
###############################################################################
@settings.logger.catch_error
@bot.message_handler(commands=['start'])
def start_message(msg):
user = bot.session.user
tech_support_recognizer = settings.ACCESSIBLE_LINK.split('=')[1]
add_info = msg.text.split()[1:]
bot.send_message(
msg.chat.id,
_(
'Welcome, {}!',
user.language
).format(msg.from_user.first_name)
)
bot.send_message(
msg.chat.id,
_(
"I am <b>{}</b>, your personal shareholder bot, and I will keep"
" you updated on important trading events!",
user.language
).format(bot.get_me().first_name),
parse_mode='html'
)
if (add_info and (
tech_support_recognizer in add_info
)) or not list(User.get_staff_users()):
# if user started bot with support link or there are not staff users
user.init_staff()
bot.send_message(
msg.chat.id,
_(
'⚙ You have received a technical support status ⚙',
user.language
)
)
settings.logger.info(f"{user} recieved staff status")
return start_bot(msg)
@bot.message_handler(commands=['menu'])
def start_bot(msg, to_show_commands: bool = True):
user = bot.session.user
buttons = [
_('Quotes', user.language),
_('Notifications', user.language),
_('Subscription', user.language),
_('Language', user.language),
_('Technical support', user.language)
]
kb = kbs(buttons, one_time_keyboard=False)
if to_show_commands:
commands_str = '\n'.join(
'{} - %s' % v for k, v in bot.short_bot_commands.items()
)
bot.send_message(
msg.chat.id,
_(
commands_str,
user.language,
).format(*list(bot.short_bot_commands)),
reply_markup=kb
)
else:
bot.send_message(
msg.chat.id, _("Main menu", user.language), reply_markup=kb
)
bot.register_next_step_handler(msg, choose_option, buttons=buttons)
def choose_option(msg, buttons=None):
buttons = buttons or []
user = bot.session.user
if buttons[0] == msg.text:
# see exchange rates for today
return get_currency_rates_today(msg)
elif buttons[1] == msg.text:
# go to notifications section
buttons = {
_("Your info", user.language): see_user_info,
_(
'Change alarm time', user.language
): change_user_rate_check_times,
_(
'Change alarm percent', user.language
): change_user_rate_percent_delta,
_('Toggle alarms', user.language): toggle_user_alarms,
_(
"Toggle experts predictions", user.language
): toggle_user_experts_predictions,
_('Change time zone', user.language): change_user_timezone,
_('Main menu', user.language): start_bot
}
if user.is_pro:
buttons[_(
'⚜ Other currencies ⚜', user.language
)] = other_user_currencies_menu
kb = kbs(list(buttons), one_time_keyboard=False, row_width=2)
bot.send_message(
msg.chat.id,
_('Выберите опцию', user.language),
reply_markup=kb
)
return bot.register_next_step_handler(
msg, change_alarms, buttons
)
elif buttons[2] == msg.text:
return buy_subscription(msg)
elif buttons[-2] == msg.text:
# change system language
return change_language(msg)
elif buttons[-1] == msg.text:
return send_techsupport_message(msg)
else:
return bot.register_next_step_handler(msg, choose_option, buttons)
@bot.message_handler(commands=['today'])
def get_currency_rates_today(msg):
user = bot.session.user
buttons_dct = {
_('Make a prediction', user.language): make_user_currency_prediction,
_('View predictions', user.language): see_users_currency_predictions,
_('Convert', user.language): convert_currency,
_('Main menu', user.language): start_bot
}
def choose_option_inner(msg_inner):
if buttons_dct.get(msg_inner.text, None) is None:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Choose only from the suggestions ❗',
user.language
)
)
bot.register_next_step_handler(msg_inner, choose_option_inner)
else:
return buttons_dct.get(msg_inner.text)(msg_inner)
bot.send_message(
msg.chat.id,
currency_parser.to_telegram_string(user.language),
parse_mode='Markdown',
reply_markup=kbs(list(buttons_dct))
)
bot.register_next_step_handler(msg, choose_option_inner)
@bot.message_handler(commands=['make_prediction'])
def make_user_currency_prediction(msg):
user: User = bot.session.user
date = None
iso_from = None
iso_to = None
value = None
def get_date(msg_inner):
nonlocal date
try:
up_to_date = convert_datetime(
convert_from_country_format(msg_inner.text, user.language),
user.timezone
)
assert check_datetime_in_future(up_to_date)
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Please enter the date only in the specified format ❗',
user.language
)
)
bot.register_next_step_handler(msg_inner, get_date)
except AssertionError:
bot.send_message(
msg_inner.chat.id,
_('❗ You cannot enter a past date ❗', user.language)
)
bot.register_next_step_handler(msg_inner, get_date)
else:
date = up_to_date
bot.send_message(
msg_inner.chat.id,
_(
'Enter the ISO-codes of the forecast currency '
'`<ISO>-<ISO>`\nFor example, USD-RUB',
user.language
),
parse_mode='Markdown',
reply_markup=kbs(settings.ACCEPTABLE_CURRENCIES_CONVERTION)
)
bot.register_next_step_handler(msg_inner, get_iso)
def get_iso(msg_inner):
nonlocal iso_from, iso_to
msg_inner.text = settings.ACCEPTABLE_CURRENCIES_CONVERTION.get(
msg_inner.text, msg_inner.text
)
try:
iso_from, iso_to = [x.strip() for x in msg_inner.text.split('-')]
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Enter currency iso codes only'
' in the specified format ❗',
user.language
)
)
else:
if currency_parser.check_rate_exists(iso_from, iso_to):
bot.send_message(
msg_inner.chat.id,
_(
"Enter the forecast result "
"(for example, 27.50, 22300)",
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_value)
else:
bot.send_message(
msg_inner.chat.id,
_(
"❗ This currency does not exist or is not supported"
", please try another one ❗",
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_iso)
def get_value(msg_inner):
nonlocal value
try:
value = float(msg_inner.text.replace(',', '.'))
except ValueError:
bot.send_message(
msg_inner.chat.id, _('❗ Enter only numbers ❗', user.language)
)
bot.register_next_step_handler(msg_inner, get_value)
else:
buttons = [_('Yes', user.language), _('No', user.language)]
bot.send_message(
msg_inner.chat.id,
_(
'Here is the forecast data:\nForecast period: {}'
'\nCurrency: {} - {}\nValue: {}\n.\nConfirm '
'forecast creation?',
user.language
).format(
convert_to_country_format(
adapt_datetime(date, user.timezone), user.language
),
iso_from,
iso_to,
prettify_float(value)
),
reply_markup=kbs(buttons)
)
bot.register_next_step_handler(
msg_inner, confirm_prediction, buttons
)
def resend_prediction_all_users(prediction):
for usr in User.get_all_users(if_all=False):
if usr.to_notify_by_experts:
if Session.db.fetch_count(usr.id) > 0:
bot.send_message(
usr.id,
_(
'*⚜ Experts prediction ⚜*\n*Currencies: {}-{}*\n'
'*Up to:* {}\n*Predicted value:* {}',
usr.language
).format(
prediction.iso_from, prediction.iso_to,
convert_to_country_format(
adapt_datetime(
prediction.up_to_date, usr.timezone
),
usr.language
),
prettify_float(prediction.value)
),
parse_mode='Markdown'
)
Session.db.decrease_count(usr.id)
else:
bot.send_message(
usr.id,
_(
"❗ Your limit on receiving predictions has"
" expired, contact our support team ❗",
usr.language
)
)
def confirm_prediction(msg_inner, buttons):
if msg_inner.text == buttons[0]:
user.create_prediction(
iso_from, iso_to, prettify_float(value), date
)
if user.is_staff:
threading.Thread(
target=resend_prediction_all_users,
args=(user.predictions[-1],), daemon=True
).start()
bot.send_message(
msg_inner.chat.id,
_('The forecast has been created!', user.language)
)
return start_bot(msg_inner)
elif msg_inner.text == buttons[1]:
bot.send_message(
msg_inner.chat.id, _('Forecast not created', user.language)
)
return start_bot(msg_inner)
else:
bot.send_message(
msg_inner.chat.id, _('Response not processed', user.language)
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_('To exit anywhere, enter {}', user.language).format('/menu')
)
datetime_format = get_country_dt_example(user.language)
datetime_example = convert_to_country_format(
adapt_datetime(get_now(), user.timezone),
user.language
)
bot.send_message(
msg.chat.id,
_(
'Select the forecast validity period in the format `{}`\n'
'For example, {}',
user.language
).format(datetime_format, datetime_example),
parse_mode='Markdown'
)
bot.register_next_step_handler(msg, get_date)
@bot.message_handler(commands=['get_predictions'])
def see_users_currency_predictions(msg):
user = bot.session.user
def see_self_predictions(msg_inner):
preds = {
x.trepr(user): f'get_prediction_{x.id}'
for x in user.get_predictions()
}
kb_inline = inline_kbs(preds, row_width=1)
if len(preds) == 0:
bot.send_message(
msg_inner.chat.id,
_('You have no predictions so far, create one!', user.language)
)
else:
bot.send_message(
msg_inner.chat.id,
_('Here are your predictions', user.language),
reply_markup=kb_inline
)
return see_users_currency_predictions(msg_inner)
def see_other_users_predictions(msg_inner):
if user.is_pro:
experts_str = (
'⚜ Experts predictions ⚜ are:\n'
+
('\n\n'.join([
x.tstr(user)
for x in Prediction.get_experts_predictions()][:5]
) or ' none')
)
if experts_str.endswith('none'):
# if no predictions were concatenated to prefix
experts_str = experts_str.replace('\n', '')
bot.send_message(
msg_inner.chat.id,
_(experts_str, user.language),
)
liked_preds_str = (
'Most liked predictions are:\n'
+
('\n\n'.join([
x.tstr(user)
for x in Prediction.get_most_liked_predictions()][:5]
) or ' none')
)
if liked_preds_str.endswith('none'):
# if no predictions were concatenated to prefix
liked_preds_str = liked_preds_str.replace('\n', '')
bot.send_message(
msg_inner.chat.id,
_(
liked_preds_str,
user.language
),
)
return see_users_currency_predictions(msg_inner)
def liking_system(msg_inner):
try:
rand_pred = Prediction.get_random_prediction()
except exceptions.PredictionDoesNotExistError:
# if no predictions are there
bot.send_message(
msg_inner.chat.id,
_(
'There are no predictions to like yet,'
' you can create one!',
user.language
)
)
return start_bot(msg_inner)
else:
closest = rand_pred.get_closest_neighbours()
previous, nxt = closest['previous'], closest['next']
inline_buttons = {
'👍': f'like_prediction_{rand_pred.id}',
'👎': f'dislike_prediction_{rand_pred.id}'
}
if previous:
inline_buttons['<<'] = f'previous_prediction_to_{rand_pred.id}'
if nxt:
inline_buttons['>>'] = f'next_prediction_to_{rand_pred.id}'
inline_kb = inline_kbs(inline_buttons, row_width=2)
bot.send_message(
msg_inner.chat.id,
_(rand_pred.tstr(user), user.language),
reply_markup=inline_kb
)
return see_users_currency_predictions(msg_inner)
def choose_option_inner(msg_inner):
res_func = buttons.get(msg_inner.text, None)
if res_func is not None:
return res_func(msg_inner)
else:
bot.send_message(
msg_inner.chat.id,
_('❗ Choose only from the suggestions ❗', user.language),
reply_markup=kbs(list(buttons))
)
bot.register_next_step_handler(msg_inner, choose_option_inner)
buttons = {
_('My predictions', user.language): see_self_predictions,
_('Other predictions', user.language): see_other_users_predictions,
_('Participate in the assessment', user.language): liking_system,
_('Main menu', user.language): start_bot
}
bot.send_message(
msg.chat.id,
_('Choose from the following:', user.language),
reply_markup=kbs(list(buttons))
)
bot.register_next_step_handler(msg, choose_option_inner)
def get_prediction_inline_kb_for_liking(pred):
closest = pred.get_closest_neighbours()
previous, nxt = closest['previous'], closest['next']
inline_buttons = {
'👍': f'like_prediction_{pred.id}',
'👎': f'dislike_prediction_{pred.id}'
}
if previous:
inline_buttons['<<'] = f'previous_prediction_to_{pred.id}'
if nxt:
inline_buttons['>>'] = f'next_prediction_to_{pred.id}'
inline_kb = inline_kbs(inline_buttons, row_width=2)
return inline_kb
@bot.callback_query_handler(
lambda call: (
'next_prediction_to_' in call.data or
'previous_prediction_to_' in call.data
)
)
def get_closest_prediction(call):
action, *data, pred_id = call.data.split('_')
start_pred = Prediction(int(pred_id))
following_pred = start_pred.get_closest_neighbours()[action]
user = bot.session.user
inline_kb = get_prediction_inline_kb_for_liking(following_pred)
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(following_pred.tstr(user), user.language),
reply_markup=inline_kb
)
@bot.callback_query_handler(
lambda call: (
'like_prediction_' in call.data or 'dislike_prediction_' in call.data
)
)
def toggle_user_reaction(call):
action, *some_data, pred_id = call.data.split('_')
prediction = Prediction(int(pred_id))
user = bot.session.user
reaction = True if action == 'like' else False
prediction.toggle_like(call.message.chat.id, reaction)
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(prediction.tstr(user), user.language),
reply_markup=get_prediction_inline_kb_for_liking(prediction)
)
bot.answer_callback_query(
callback_query_id=call.id,
show_alert=False,
text=_(f'You {action}d this prediction', user.language)
)
@bot.callback_query_handler(lambda call: 'get_prediction_' in call.data)
def get_prediction_details(call):
pred_id = int(call.data.split('_')[-1])
pred = Prediction(pred_id)
user = bot.session.user
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(pred.tstr(user), user.language),
reply_markup=inline_kbs({
_('Delete', user.language): f'ask_delete_prediction_{pred_id}',
_('Back', user.language): f'get_user_predictions_{pred.user_id}'
}, row_width=1)
)
@bot.callback_query_handler(lambda call: 'ask_delete_prediction_' in call.data)
def ask_delete_prediction(call):
pred_id = int(call.data.split('_')[-1])
pred = Prediction(pred_id)
user = bot.session.user
if pred.is_actual:
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_(
"Are you sure you want to delete this prediction:\n{}?",
user.language
).format(pred.trepr(user)),
reply_markup=inline_kbs({
_('Yes', user.language): f'delete_prediction_{pred_id}',
_('No', user.language): f'get_user_predictions_{pred.user_id}'
})
)
else:
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_('You cannot delete a verified prediction!', user.language),
reply_markup=inline_kbs({
_(
'Back', user.language
): f'get_user_predictions_{pred.user_id}'
})
)
@bot.callback_query_handler(lambda call: 'delete_prediction_' in call.data)
def delete_prediction(call):
pred_id = int(call.data.split('_')[-1])
prediction = Prediction(pred_id)
user = bot.session.user
bot.delete_message(call.message.chat.id, call.message.message_id)
if prediction.is_actual:
prediction.delete()
answer_msg = _(
"Prediction ({}) was deleted",
user.language
).format(prediction.trepr(user))
else:
answer_msg = _(
'You cannot delete a verified prediction!', user.language
)
bot.answer_callback_query(
callback_query_id=call.id,
show_alert=False,
text=answer_msg
)
@bot.callback_query_handler(lambda call: 'get_user_predictions_' in call.data)
def get_user_predictions(call):
user = bot.session.user
kb_inline = inline_kbs({
x.trepr(user): f'get_prediction_{x.id}'
for x in user.get_predictions()
}, row_width=1)
return bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=_('Here are your predictions', user.language),
reply_markup=kb_inline
)
@bot.message_handler(commands=['convert'])
def convert_currency(msg):
user = bot.session.user
iso_from = None
iso_to = None
def get_isos(msg_inner):
nonlocal iso_from, iso_to
try:
iso_from, iso_to = [x.upper() for x in msg_inner.text.split('-')]
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Enter currency iso codes'
' only in the specified format ❗',
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_isos)
else:
return print_convertation(msg_inner)
def print_convertation(msg_inner):
nonlocal iso_from, iso_to
try:
rate = currency_parser.get_rate(iso_from, iso_to)
except Exception:
bot.send_message(
msg_inner.chat.id,
_(
"❗ The converter did not find such"
" currencies, please try again ❗",
user.language
)
)
return bot.register_next_step_handler(msg_inner, get_isos)
else:
markup = inline_kbs(
{
i: f"change_currency_converter_amount_to_{i}"
for i in settings.CURRENCY_RATES_CHANGE_AMOUNTS
}
)
bot.send_message(
msg_inner.chat.id,
_('Conversion by {}:\n{} {} - {} {}', user.language).format(
convert_to_country_format(
adapt_datetime(get_now(), user.timezone),
user.language
),
prettify_float(rate[iso_from]),
iso_from,
prettify_float(rate[iso_to]),
iso_to
),
reply_markup=markup
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_(
'Enter the ISO-codes of currencies `<ISO>-<ISO>`\n'
'For example, USD-RUB',
user.language
),
parse_mode='Markdown'
)
bot.register_next_step_handler(msg, get_isos)
@bot.callback_query_handler(
lambda call: 'change_currency_converter_amount_to_' in call.data
)
def get_callback_for_change_currency_converter_amount(call):
user = bot.session.user
def change_currency_converter_amount(call_inner):
try:
if call_inner.message:
change_amount = call_inner.data.split('_')[-1]
change_amount = float(change_amount)
iso_from, iso_to = [
x.split()
for x in call_inner.message.text.split(':')[-1].split('-')
]
rate = float(iso_to[0].replace(',', '.')) / float(
iso_from[0].replace(',', '.')
)
new_amount = rate * change_amount
markup = inline_kbs(
{
i: f"change_currency_converter_amount_to_{i}"
for i in settings.CURRENCY_RATES_CHANGE_AMOUNTS
}
)
if change_amount == float(iso_from[0]):
# if we try to set the same text as before, an error occurs
return bot.answer_callback_query(
callback_query_id=call_inner.id,
show_alert=False,
text=_(
f"Amount is already {change_amount}",
user.language
)
)
else:
bot.edit_message_text(
chat_id=call_inner.message.chat.id,
message_id=call_inner.message.message_id,
text=_(
'Conversion by {}:\n{} {} - {} {}',
user.language
).format(
convert_to_country_format(
adapt_datetime(get_now(), user.timezone),
user.language
),
prettify_float(change_amount),
iso_from[1],
prettify_float(new_amount),
iso_to[1]
),
reply_markup=markup
)
bot.answer_callback_query(
callback_query_id=call_inner.id,
show_alert=False,
text=_(
"Amount on {}-{} changed to {}",
user.language
).format(iso_from[1], iso_to[1], change_amount)
)
except Exception as e:
print(repr(e))
def ask_sum(msg, call_inner, to_delete: list):
try:
value = float(msg.text.replace(',', '.'))
except ValueError:
warning_msg = bot.send_message(
msg.chat.id, _('❗ Enter only numbers ❗', user.language)
)
to_delete = list(to_delete) + [msg, warning_msg]
bot.register_next_step_handler(msg, ask_sum, call_inner, to_delete)
else:
call_inner.data = f"change_currency_converter_amount_to_{value}"
try:
# delete messages
for msg_ in to_delete:
bot.delete_message(msg_.chat.id, msg_.message_id)
bot.delete_message(msg.chat.id, msg.message_id)
except Exception as e:
# permission to delete messages was not received
print(repr(e))
return change_currency_converter_amount(call_inner)
def set_amount_to_1(call_inner):
call_inner.data = f"change_currency_converter_amount_to_{1}"
return change_currency_converter_amount(call_inner)
if call.message:
command = call.data.split('_')[-1]
if command == '...':
# bot.clear_step_handler(call.message)
msg_to_delete = bot.send_message(
call.message.chat.id,
_(
'Enter new amount',
user.language
)
)
return bot.register_next_step_handler(
call.message, ask_sum, call, [msg_to_delete]
)
elif command == 'Reset':
return set_amount_to_1(call)
def change_alarms(msg, buttons):
user = bot.session.user
func = buttons.get(msg.text, None)
if func is None:
bot.send_message(
msg.chat.id,
_(
"❗ I can't understand your request, please try again ❗",
user.language
),
reply_markup=kbs(list(buttons), row_width=2)
)
return bot.register_next_step_handler(
msg,
change_alarms,
buttons
)
else:
return func(msg)
@bot.message_handler(commands=['toggle_alarms'])
def toggle_user_alarms(msg):
user = bot.session.user
user.update(is_active=not user.is_active)
bot.send_message(
msg.chat.id,
_(
f"Notifications {'en' if user.is_active else 'dis'}abled",
user.language
)
)
return start_bot(msg)
@bot.message_handler(commands=['toggle_experts_predictions'])
def toggle_user_experts_predictions(msg):
user = bot.session.user
user.update(to_notify_by_experts=not user.to_notify_by_experts)
bot.send_message(
msg.chat.id,
_(
"Experts' predictions {}abled".format(
'en' if user.to_notify_by_experts else 'dis'
),
user.language
)
)
return start_bot(msg)
@bot.message_handler(commands=['me'])
def see_user_info(msg):
u = bot.session.user
is_subscribed = (
f'до {convert_to_country_format(u.is_pro, u.language)}'
if isinstance(u.is_pro, datetime.datetime) else
'да' if u.is_pro is True else 'нет'
)
info = (
f"Пользователь @{msg.from_user.username}\n" +
f"Telegram ID: {u.id}\n" +
f"Подписка: {is_subscribed}\n" +
f"Персонал: {'да' if u.is_staff else 'нет'}\n" +
f"Часовой пояс: {prettify_utcoffset(u.timezone)}\n" +
f"Оповещения: {'включены' if u.is_active else 'отключены'}\n" +
'Прогнозы от экспертов: {}\n'.format(
'включены' if u.to_notify_by_experts else 'отключены'
) +
User.prettify_rates(u.rates)
)
bot.send_message(msg.chat.id, _(info, u.language))
return start_bot(msg)
@settings.logger.catch_error
@bot.message_handler(commands=['change_delta'])
def change_user_rate_percent_delta(msg):
user = bot.session.user
currency = None
def inner1(msg_inner):
nonlocal currency
if msg_inner.text in user.rates:
currency = msg_inner.text
bot.send_message(
msg_inner.chat.id,
_(
"Your interest on {} - {}\nSelect the amount of interest",
user.language
).format(
currency,
prettify_percent(
user.rates.get(currency).get('percent_delta')
)
),
reply_markup=kbs(settings.PERCENTAGES)
)
bot.register_next_step_handler(msg_inner, inner2)
else:
bot.send_message(
msg_inner.chat.id,
'❗ Please enter only valid currencies ❗',
reply_markup=kbs(settings.CURRENCIES)
)
bot.register_next_step_handler(msg_inner, inner1)
def inner2(msg_inner):
nonlocal currency
try:
if 'inf' not in msg_inner.text:
delta = float(msg_inner.text) / 100
assert 0 < delta < 1
else:
raise ValueError
except ValueError:
bot.send_message(
msg_inner.chat.id,
_("❗ Enter only numbers ❗", user.language)
)
return bot.register_next_step_handler(msg_inner, inner2)
except AssertionError:
bot.send_message(
msg_inner.chat.id,
_("❗ Percent must be in range from 0 to 100 ❗", user.language)
)
return bot.register_next_step_handler(msg_inner, inner2)
user.update_rates(currency, percent_delta=delta)
bot.send_message(
msg_inner.chat.id,
_("Your percentage is now {}", user.language).format(
prettify_percent(delta)
)
)
return start_bot(msg_inner)
kb = kbs(list(user.rates))
bot.send_message(
msg.chat.id,
_("Выберите валюту изменения процентов", user.language),
reply_markup=kb
)
return bot.register_next_step_handler(msg, inner1)
@settings.logger.catch_error
@bot.message_handler(commands=['change_checktime'])
def change_user_rate_check_times(msg):
user = bot.session.user
available_times = copy.deepcopy(settings.CHECK_TIMES)
chosen_times = []
start = (
settings.UNSUBSCIRBED_USER_CHECK_TIMES
if not user.is_pro else
settings.SUBSCIRBED_USER_CHECK_TIMES
)
currency = None
def inner1(msg_inner):
nonlocal currency
if msg_inner.text in user.rates:
currency = msg_inner.text
if user.is_pro:
bot.send_message(
msg_inner.chat.id,
_(
"You subscribed ⚜ and you are presented"
" with all possible alert times!",
user.language
)
)
return start_bot(msg_inner)
else:
bot.send_message(
msg_inner.chat.id,
_(
'Your alert times for {} - {}',
user.language
).format(
currency,
','.join(
adapt_check_times(
user.rates.get(currency).get('check_times'),
user.timezone
)
)
)
)
bot.send_message(
msg_inner.chat.id,
_(
'Select {} time(s)',
user.language
).format(start),
reply_markup=kbs(
adapt_check_times(available_times, user.timezone)
)
)
bot.register_next_step_handler(msg_inner, inner2, start)
else:
bot.send_message(
msg_inner.chat.id,
_('❗ Please enter only valid currencies ❗', user.language),
reply_markup=kbs(
adapt_check_times(settings.CURRENCIES, user.timezone)
)
)
bot.register_next_step_handler(msg_inner, inner1)
def inner2(msg_inner, iteration_num):
nonlocal chosen_times, available_times
try:
if msg_inner.text in available_times:
time.strptime(msg_inner.text, '%H:%M')
iteration_num -= 1
available_times.remove(msg_inner.text)
chosen_times.append(msg_inner.text)
else:
raise ValueError
if iteration_num == 0:
chosen_times = sorted(
chosen_times,
key=lambda x: int(x.split(':')[0])
)
user.update_rates(currency, check_times=chosen_times)
bot.send_message(
msg_inner.chat.id,
_(
'Your alert times for {} - {}',
user.language
).format(
currency,
", ".join(chosen_times)
)
)
return start_bot(msg_inner)
except ValueError: # if time not in CHECK_TIMES or time is not valid
bot.send_message(
msg_inner.chat.id,
_(
"❗ Please enter only available dates ❗",
user.language
)
)
return bot.register_next_step_handler(
msg_inner, inner2, iteration_num
)
else:
bot.send_message(
msg_inner.chat.id,
_(
f"Enter more {iteration_num} time(s)",
user.language),
reply_markup=kbs(
adapt_check_times(available_times, user.timezone)
)
)
bot.register_next_step_handler(msg_inner, inner2, iteration_num)
kb = kbs(user.rates.keys())
bot.send_message(
msg.chat.id,
_("Select the currency of the alert time change", user.language),
reply_markup=kb
)
return bot.register_next_step_handler(msg, inner1)
@settings.logger.catch_error
@bot.message_handler(commands=['change_timezone'])
def change_user_timezone(msg):
user = bot.session.user
timezones = {
prettify_utcoffset(zone): zone
for zone in range(-11, 13)
}
def accept_input(msg_inner):
res_timezone = timezones.get(msg_inner.text, None)
if res_timezone is None:
bot.send_message(
msg_inner.chat.id,
_(
'❗ Please enter only suggested time zones ❗',
user.language,
),
reply_markup=kbs(list(timezones), row_width=2)
)
bot.register_next_step_handler(msg_inner, accept_input)
else:
user.update(timezone=res_timezone)
bot.send_message(
msg_inner.chat.id,
_(
'Now your time zone is {}',
user.language
).format(prettify_utcoffset(user.timezone))
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_(
'Your current time zone is {}\nPlease select your time zone',
user.language
).format(prettify_utcoffset(user.timezone)),
reply_markup=kbs(list(timezones), row_width=2)
)
bot.register_next_step_handler(msg, accept_input)
def other_user_currencies_menu(msg):
user = bot.session.user
buttons = {
_("Add new currency", user.language): add_new_currency,
_("Delete currency", user.language): delete_user_currency,
_("Back", user.language): start_bot
}
def next_step(msg_inner):
option = buttons.get(msg_inner.text, None)
if option is None:
bot.send_message(
msg_inner.chat.id,
_('❗ Choose only from the suggestions ❗', user.language)
)
bot.register_next_step_handler(msg_inner, next_step)
else:
return option(msg_inner)
bot.send_message(
msg.chat.id,
_('Choose from the following:', user.language),
reply_markup=kbs(list(buttons), row_width=3)
)
bot.register_next_step_handler(msg, next_step)
@settings.logger.catch_error
def delete_user_currency(msg):
user = bot.session.user
curr = None
deletable_currencies = list(
set(user.rates).difference(set(settings.CURRENCIES))
)
answer_options = {
_("Yes", user.language): True,
_("No", user.language): False
}
def confirm_deletion(msg_inner):
option = answer_options.get(msg_inner.text, None)
if option is True:
user.delete_rate(curr)
bot.send_message(
msg_inner.chat.id,
_("Currency {} was deleted", user.language).format(curr)
)
elif option is False:
bot.send_message(
msg_inner.chat.id,
_("Currency {} wasn't deleted", user.language).format(curr)
)
elif option is None:
bot.send_message(
msg_inner.chat.id,
_(
"I don't understand your answer,"
" returning to the main menu...",
user.language
)
)
return start_bot(msg_inner)
def choose_currency_to_delete(msg_inner):
nonlocal curr
curr = msg_inner.text
if curr in deletable_currencies:
bot.send_message(
msg_inner.chat.id,
_(
"Are you sure you want to delete this currency: {}?",
user.language
).format(curr),
reply_markup=kbs(list(answer_options))
)
bot.register_next_step_handler(msg_inner, confirm_deletion)
else:
if curr == _("Back", user.language):
return start_bot(msg_inner)
elif curr in settings.CURRENCIES:
bot.send_message(
msg_inner.chat.id,
_("❗ You can't delete default currencies ❗", user.language)
)
else:
bot.send_message(
msg_inner.chat.id,
_("❗ This currency is not supported ❗", user.language)
)
bot.register_next_step_handler(
msg_inner, choose_currency_to_delete
)
if len(deletable_currencies) > 0:
bot.send_message(
msg.chat.id,
_("Choose currency to delete", user.language),
reply_markup=kbs(
deletable_currencies + [_("Back", user.language)],
one_time_keyboard=False
)
)
bot.register_next_step_handler(msg, choose_currency_to_delete)
else:
bot.send_message(
msg.chat.id,
_("You have no extra currencies to delete", user.language)
)
return start_bot(msg)
@settings.logger.catch_error
def add_new_currency(msg):
user = bot.session.user
def ask_new_iso(msg_inner):
iso = msg_inner.text
try:
rate = currency_parser.get_rate(iso, "USD").get("USD")
except ValueError:
bot.send_message(
msg_inner.chat.id,
_(
'❗ This currency does not exist or is not supported,'
' please try another one ❗',
user.language
)
)
bot.register_next_step_handler(msg_inner, ask_new_iso)
else:
if iso in user.rates:
bot.send_message(
msg_inner.chat.id,
_(
'❗ The currency is already on your currency list ❗',
user.language
)
)
return start_bot(msg_inner)
elif user.is_pro:
user.add_rate(
iso, value=rate, check_times=settings.CHECK_TIMES
)
bot.send_message(
msg_inner.chat.id,
_(
'New currency has been created successfully!\n'
'Now the rate is {} - {} USD',
user.language
).format(iso, rate)
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_('Enter the ISO-code of the new currency', user.language),
reply_markup=kbs(['RUB', 'EUR', 'UAH', 'BYN'])
)
bot.register_next_step_handler(msg, ask_new_iso)
@settings.logger.catch_error
@bot.message_handler(commands=['subscription'])
def buy_subscription(msg):
user = bot.session.user
json_config = get_json_config()
prices_json_list = json_config.get('subscriptionPrices')
start_price = json_config.get('subscriptionStartPrice')
prices = [
[
LabeledPrice(
label=f"Cost of subscription for {p.get('period')} month" + (
's' if p.get('period') > 1 else ''
),
amount=int(prettify_float(start_price * p.get('period')) * 100)
)
] + ([
LabeledPrice(
label=f'Discount {p.get("discount")*100}%',
amount=-int(prettify_float(
start_price * p.get('period') * p.get('discount')
) * 100)
# * 100 because `amount` is interpreted in cents
)
] if p.get('discount') > 0 else [])
for p in prices_json_list
]
prices_easy = {
price.get('period'): price.get('discount')
for price in prices_json_list
}
def confirm_payment(msg_inner):
if msg_inner.text == _('Yes, I want to!', user.language):
prices_str = ''
for price in prices_json_list:
period = price.get('period')
word_ending = (
'' if period == 1 else
'a' if period in range(2, 5) else 'ов'
)
total_sum = int(substract_percent(
period * start_price, price.get('discount')
))
prices_str += f'\n{period} месяц{word_ending} - {total_sum} $'
bot.send_message(
msg_inner.chat.id,
_(
'Отлично!\nВыберите длительность Подписки (в месяцах)\n'
f'{prices_str}',
user.language
),
reply_markup=kbs(list(prices_easy))
)
bot.register_next_step_handler(msg_inner, get_months_number)
elif msg_inner.text == _('No, thanks', user.language):
bot.send_message(
msg_inner.chat.id, _('Okay, we\'ll wait!', user.language)
)
return start_bot(msg_inner)
else:
bot.send_message(
msg_inner.chat.id,
_(
"I don't understand your answer, "
"returning to the main menu...",
user.language
)
)
return start_bot(msg_inner)
def get_months_number(msg_inner):
months = msg_inner.text
if not (months.isdigit() and (
int(msg_inner.text) in list(prices_easy))
):
bot.send_message(
msg_inner.chat.id,
_('❗ Please enter only suggested values ❗', user.language),
reply_markup=kbs(list(prices_easy))
)
bot.register_next_step_handler(msg_inner, get_months_number)
else:
price = [
(y, x)
for x, y in zip(list(prices_easy), prices)
if x == int(months)
][0]
bot.send_message(
msg_inner.chat.id,
_(
'❗ Pay just as you receive invoice, '
'otherwise payment can be not received ❗',
user.language
)
)
return command_pay(msg_inner, *price)
def command_pay(msg_inner, prices_inner, n_months: int = None):
bot.send_invoice(
msg_inner.chat.id,
title=_('Подписка', user.language),
description=_(
"You pay for a Subscription for {} month(s)",
user.language
).format(n_months),
provider_token=settings.PAYMENT_TOKEN,
currency='usd',
photo_url='https://i1.wp.com/bestservices.reviews/wp-content/'
'uploads/2019/09/Subscription-Billing.jpg?w=1200&ssl=1',
photo_height=300, # !=0/None or picture won't be shown
photo_width=600,
photo_size=512,
start_parameter='subscription-telegram-bot',
is_flexible=False, # True If you need to set up Shipping Fee
prices=prices_inner,
invoice_payload=f"{n_months}"
)
if not user.is_pro:
bot.send_message(
msg.chat.id,
_(
'When buying a Subscription, you get access to:\n'
'1. Unlimited number of alerts per day\n'
'2. Forecasts from experts\n'
'3. Adding your currencies to alerts\n'
'And more! \n\nBuy a Subscription '
'today, and you will not regret it',
user.language
),
reply_markup=kbs([
_('Yes, I want to!', user.language),
_('No, thanks', user.language)
])
)
bot.register_next_step_handler(msg, confirm_payment)
else:
bot.send_message(
msg.chat.id,
_('You have already subscribed!', user.language)
)
return start_bot(msg)
@bot.pre_checkout_query_handler(func=lambda query: True)
def checkout_handler(pre_checkout_query):
user = User(pre_checkout_query.from_user.id)
bot.answer_pre_checkout_query(
pre_checkout_query.id,
ok=True,
error_message=_(
"Oops, some error occurred, please try again later",
user.language
)
)
@bot.message_handler(content_types=['successful_payment'])
def subscription_payment_success(msg):
user = bot.session.user
n_months = int(msg.successful_payment.invoice_payload)
datetime_expires = get_now() + datetime.timedelta(days=n_months*31)
user.init_premium(datetime_expires)
bot.send_message(
msg.chat.id,
_(
"You have activated the Subscription until {}\nHappy trades!",
user.language
).format(
convert_to_country_format(
adapt_datetime(datetime_expires, user.timezone),
user.language
)
)
)
settings.logger.info(
"{} paid for subscription until {}".format(
str(user), adapt_datetime(datetime_expires, 0)
)
)
return start_bot(msg)
@bot.message_handler(commands=['language'])
def change_language(msg):
user = bot.session.user
buttons = [_('Russian 🇷🇺', user.language), _('English 🇬🇧', user.language)]
def confirm_language(msg_inner):
if buttons[0] == msg_inner.text:
user.update(language='ru')
elif buttons[1] == msg_inner.text:
user.update(language='en')
else:
bot.send_message(
msg_inner.chat.id,
_(
"❗ Choose only from the suggested languages ❗",
user.language
),
reply_markup=kbs(buttons)
)
return bot.register_next_step_handler(
msg_inner, confirm_language, user
)
bot.send_message(
msg_inner.chat.id,
_("Language changed successfully", user.language)
)
return start_bot(msg_inner)
bot.send_message(
msg.chat.id,
_(
'At the moment, the service has two languages: '
'Russian 🇷🇺 and English 🇬🇧',
user.language
),
reply_markup=kbs(buttons)
)
bot.register_next_step_handler(msg, confirm_language)
@bot.message_handler(commands=['techsupport'])
def send_techsupport_message(msg):
user = bot.session.user
if not user.is_staff:
bot.send_message(
msg.chat.id,
_(
'⚙ This is techsupport of @{} ⚙\n'
'Feel free to send us any feedbacks about this bot,'
' we are always grateful for your help!',
user.language
).format(bot.get_me().username),
reply_markup=inline_kbs(
{
_(
'Send message to Techsupport', user.language
): 'send_message_to_techsupport'
}
)
)
else:
bot.send_message(
msg.chat.id,
_('⚙ You are already a staff member ⚙', user.language)
)
return start_bot(msg)
@bot.callback_query_handler(
lambda call: call.data == 'send_message_to_techsupport'
)
def send_message_to_techsupport(call):
def send_message(msg):
answer_msg = ''
support_id = None
try:
for support_id in get_json_config().get('techsupportIds'):
bot.forward_message(
chat_id=support_id,
from_chat_id=msg.chat.id,
message_id=msg.message_id
)
except Exception:
answer_msg = _("Some error occurred", user.language)
print(f"ERROR: cannot send support message to {support_id}")
else:
answer_msg = _("Your message was received", user.language)
finally:
bot.send_message(msg.chat.id, answer_msg)
bot.clear_step_handler(msg)
return start_bot(msg)
if call.message:
user = bot.session.user
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text=call.message.text
) # make the button disappear
bot.send_message(
user.id,
_(
'Напишите сообщение техподдержке ({} - возврат в меню)',
user.language
).format('/menu', bot.get_me().username)
)
bot.register_next_step_handler(call.message, send_message)
@bot.message_handler(commands=['help'])
def send_bot_help(msg):
user = bot.session.user
help_message = "Bot's commands:\n" + '\n'.join([
'{} - %s' % v for k, v in bot.full_bot_commands.items()
])
bot.send_message(
msg.chat.id,
_(
help_message,
user.language
).replace('{ }', '{}').format(*list(bot.full_bot_commands.keys()))
)
return start_bot(msg, to_show_commands=False)
###############################################################################
@schedule.repeat(schedule.every(3).minutes)
def update_rates():
for parser in currency_parser.parsers.values():
if not parser.update_value(safe=True):
settings.logger.error(f"Rate {parser.iso}-USD can not be updated")
settings.logger.debug("Rates updated")
@schedule.repeat(schedule.every(10).minutes)
def update_proxies():
proxies = get_proxy_list()
for parser in currency_parser.parsers.values():
parser.proxy_list = proxies
settings.logger.debug(f"Proxies updated, length: {len(proxies)}")
@schedule.repeat(schedule.every(3).minutes)
@settings.logger.catch_error
def check_premium_ended():
def check_user_premium_ended(usr):
if not check_datetime_in_future(usr.is_pro):
bot.send_message(
usr.id,
_(
'Your premium has expired, but you can always refresh it!',
usr.language
)
)
usr.delete_premium()
settings.logger.info(f"{usr} lost premium")
with futures.ThreadPoolExecutor(max_workers=50) as executor:
for user in User.get_pro_users(only_temp=True):
executor.submit(check_user_premium_ended, user)
@schedule.repeat(schedule.every().minutes.at(':00'))
@settings.logger.catch_error
def verify_predictions():
for pred in Prediction.get_unverified_predictions():
user = User(pred.user_id)
try:
pred_res = currency_parser.get_rate(pred.iso_from, pred.iso_to)
except exceptions.ParsingError:
settings.logger.error(
f"Rate {pred.iso_from}-{pred.iso_to} is unreachable"
)
user.create_prediction(
pred.iso_from,
pred.iso_to,
pred.value,
pred.up_to_date + datetime.timedelta(0, 5*60) # 5 minutes
)
bot.send_messsage(
pred.user_id,
_(
"The rates are unreachable, "
"the prediction `{}` was scheduled for 5 minutes later",
user.language
).format(pred.trepr(user))
)
pred.delete(force=True)
else:
pred.update(real_value=pred_res.get(pred.iso_to))
diff = currency_parser.calculate_difference(
old=pred.value, new=pred.real_value
)
bot.send_message(
pred.user_id,
_(
'Results of `{}`:\n*Predicted value:* {}\n'
'*Real value:* {}\n*Percentage difference:* {}',
user.language
).format(
pred.trepr(user),
prettify_float(pred.value),
prettify_float(pred.real_value),
prettify_percent(
diff.get('percentage_difference'), to_sign=True
)
),
parse_mode='Markdown'
)
settings.logger.debug(f"{str(pred)} verified")
@schedule.repeat(schedule.every().minutes.at(':00'))
@settings.logger.catch_error
def start_alarms():
t = get_now().strftime('%H:%M')
with futures.ThreadPoolExecutor(max_workers=50) as executor:
for user in User.get_users_by_check_time(t):
executor.submit(send_alarm, user, t)
@settings.logger.catch_error
def send_alarm(user, t):
for k, v in user.get_currencies_by_check_time(t).items():
try:
rate = currency_parser.check_delta(
k, 'USD',
v.get('value'), v.get('percent_delta')
)
except exceptions.ParsingError:
settings.logger.error(f"Rate {k}-USD is unreachable")
bot.send_message(
user.id,
_(
"The rates are not available, "
"the notification can not be sent",
user.language
)
)
else:
if rate.get('new', None) is not None:
new, old = rate.get('new'), rate.get('old')
user.update_rates(k, value=new)
try:
bot.send_message(
user.id,
_(
'*Notification*\n*{}* = *{} USD*\n'
'The change: *{:+} ({})*\n'
'Previous: *{} = {} USD *',
user.language
).format(
k,
prettify_float(new),
prettify_float(rate.get('difference')),
prettify_percent(
rate.get('percentage_difference'),
to_sign=True
),
k,
prettify_float(old)
),
parse_mode='Markdown'
)
settings.logger.debug(
f"Sent '{k}-USD' alarm for {str(user)}"
)
except telebot.apihelper.ApiTelegramException:
# from traceback: "Bad Request: chat not found"
user.update(is_active=0)
settings.logger.warning(f"{str(user)} is not reachable")
# not to notify anymore, since chat is not reachable
def schedule_thread():
while True:
schedule.run_pending()
time.sleep(1)
def main():
import logging
telebot.logger.setLevel(logging.DEBUG)
settings.logger.set_level('debug')
settings.logger.info("Bot started")
threading.Thread(target=schedule_thread, daemon=True).start()
bot.polling()
settings.logger.info("Bot stopped")
###############################################################################
if __name__ == '__main__':
main()
| 35.719631 | 80 | 0.518924 |
1c89baba592db30451f7f61b82d96fd736b64e86 | 9,317 | py | Python | python/dask_cudf/dask_cudf/sorting.py | NVnavkumar/cudf | ef6a3907d0fc9223fe3f0e61b73a0ea9f5c7281f | [
"Apache-2.0"
] | 239 | 2018-10-10T09:55:22.000Z | 2018-10-28T20:47:23.000Z | python/dask_cudf/dask_cudf/sorting.py | NVnavkumar/cudf | ef6a3907d0fc9223fe3f0e61b73a0ea9f5c7281f | [
"Apache-2.0"
] | 25 | 2018-10-10T14:46:32.000Z | 2018-10-28T22:16:14.000Z | python/dask_cudf/dask_cudf/sorting.py | NVnavkumar/cudf | ef6a3907d0fc9223fe3f0e61b73a0ea9f5c7281f | [
"Apache-2.0"
] | 19 | 2018-10-10T12:42:51.000Z | 2018-10-26T16:33:22.000Z | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
from collections.abc import Iterator
import cupy
import numpy as np
import tlz as toolz
from dask.base import tokenize
from dask.dataframe import methods
from dask.dataframe.core import DataFrame, Index, Series
from dask.dataframe.shuffle import rearrange_by_column
from dask.highlevelgraph import HighLevelGraph
from dask.utils import M
import cudf as gd
from cudf.api.types import is_categorical_dtype
from cudf.utils.utils import _dask_cudf_nvtx_annotate
@_dask_cudf_nvtx_annotate
def set_index_post(df, index_name, drop, column_dtype):
df2 = df.set_index(index_name, drop=drop)
df2.columns = df2.columns.astype(column_dtype)
return df2
@_dask_cudf_nvtx_annotate
def _set_partitions_pre(s, divisions, ascending=True, na_position="last"):
if ascending:
partitions = divisions.searchsorted(s, side="right") - 1
else:
partitions = (
len(divisions) - divisions.searchsorted(s, side="right") - 1
)
partitions[(partitions < 0) | (partitions >= len(divisions) - 1)] = (
0 if ascending else (len(divisions) - 2)
)
partitions[s._columns[0].isnull().values] = (
len(divisions) - 2 if na_position == "last" else 0
)
return partitions
@_dask_cudf_nvtx_annotate
def _quantile(a, q):
n = len(a)
if not len(a):
return None, n
return (a.quantiles(q=q.tolist(), interpolation="nearest"), n)
@_dask_cudf_nvtx_annotate
def merge_quantiles(finalq, qs, vals):
"""Combine several quantile calculations of different data.
[NOTE: Same logic as dask.array merge_percentiles]
"""
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = np.array(finalq)
qs = list(map(list, qs))
vals = list(vals)
vals, Ns = zip(*vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
raise ValueError("No non-trivial arrays found")
qs, vals, Ns = L
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError("qs, vals, and Ns parameters must be the same length")
# transform qs and Ns into number of observations between quantiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty(len(q))
count[1:] = np.diff(q)
count[0] = q[0]
count *= N
counts.append(count)
def _append_counts(val, count):
val["_counts"] = count
return val
# Sort by calculated quantile values, then number of observations.
combined_vals_counts = gd.core.reshape._merge_sorted(
[*map(_append_counts, vals, counts)]
)
combined_counts = cupy.asnumpy(combined_vals_counts["_counts"].values)
combined_vals = combined_vals_counts.drop(columns=["_counts"])
# quantile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq quantiles to match combined_q
desired_q = finalq * sum(Ns)
# TODO: Support other interpolation methods
# For now - Always use "nearest" for interpolation
left = np.searchsorted(combined_q, desired_q, side="left")
right = np.searchsorted(combined_q, desired_q, side="right") - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals.iloc[index]
return rv.reset_index(drop=True)
@_dask_cudf_nvtx_annotate
def _approximate_quantile(df, q):
"""Approximate quantiles of DataFrame or Series.
[NOTE: Same logic as dask.dataframe Series quantile]
"""
# current implementation needs q to be sorted so
# sort if array-like, otherwise leave it alone
q_ndarray = np.array(q)
if q_ndarray.ndim > 0:
q_ndarray.sort(kind="mergesort")
q = q_ndarray
# Lets assume we are dealing with a DataFrame throughout
if isinstance(df, (Series, Index)):
df = df.to_frame()
assert isinstance(df, DataFrame)
final_type = df._meta._constructor
# Create metadata
meta = df._meta_nonempty.quantiles(q=q)
# Define final action (create df with quantiles as index)
def finalize_tsk(tsk):
return (final_type, tsk)
return_type = df.__class__
# pandas/cudf uses quantile in [0, 1]
# numpy / cupy uses [0, 100]
qs = np.asarray(q)
token = tokenize(df, qs)
if len(qs) == 0:
name = "quantiles-" + token
empty_index = gd.Index([], dtype=float)
return Series(
{
(name, 0): final_type(
{col: [] for col in df.columns},
name=df.name,
index=empty_index,
)
},
name,
df._meta,
[None, None],
)
else:
new_divisions = [np.min(q), np.max(q)]
name = "quantiles-1-" + token
val_dsk = {
(name, i): (_quantile, key, qs)
for i, key in enumerate(df.__dask_keys__())
}
name2 = "quantiles-2-" + token
merge_dsk = {
(name2, 0): finalize_tsk(
(merge_quantiles, qs, [qs] * df.npartitions, sorted(val_dsk))
)
}
dsk = toolz.merge(val_dsk, merge_dsk)
graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])
df = return_type(graph, name2, meta, new_divisions)
def set_quantile_index(df):
df.index = q
return df
df = df.map_partitions(set_quantile_index, meta=meta)
return df
@_dask_cudf_nvtx_annotate
def quantile_divisions(df, by, npartitions):
qn = np.linspace(0.0, 1.0, npartitions + 1).tolist()
divisions = _approximate_quantile(df[by], qn).compute()
columns = divisions.columns
# TODO: Make sure divisions are correct for all dtypes..
if (
len(columns) == 1
and df[columns[0]].dtype != "object"
and not is_categorical_dtype(df[columns[0]].dtype)
):
dtype = df[columns[0]].dtype
divisions = divisions[columns[0]].astype("int64")
divisions.iloc[-1] += 1
divisions = sorted(
divisions.drop_duplicates().astype(dtype).to_arrow().tolist(),
key=lambda x: (x is None, x),
)
else:
for col in columns:
dtype = df[col].dtype
if dtype != "object":
divisions[col] = divisions[col].astype("int64")
divisions[col].iloc[-1] += 1
divisions[col] = divisions[col].astype(dtype)
else:
divisions[col].iloc[-1] = chr(
ord(divisions[col].iloc[-1][0]) + 1
)
divisions = divisions.drop_duplicates().sort_index()
return divisions
@_dask_cudf_nvtx_annotate
def sort_values(
df,
by,
max_branch=None,
divisions=None,
set_divisions=False,
ignore_index=False,
ascending=True,
na_position="last",
sort_function=None,
sort_function_kwargs=None,
):
"""Sort by the given list/tuple of column names."""
if not isinstance(ascending, bool):
raise ValueError("ascending must be either True or False")
if na_position not in ("first", "last"):
raise ValueError("na_position must be either 'first' or 'last'")
npartitions = df.npartitions
if isinstance(by, tuple):
by = list(by)
elif not isinstance(by, list):
by = [by]
# parse custom sort function / kwargs if provided
sort_kwargs = {
"by": by,
"ascending": ascending,
"na_position": na_position,
}
if sort_function is None:
sort_function = M.sort_values
if sort_function_kwargs is not None:
sort_kwargs.update(sort_function_kwargs)
# handle single partition case
if npartitions == 1:
return df.map_partitions(sort_function, **sort_kwargs)
# Step 1 - Calculate new divisions (if necessary)
if divisions is None:
divisions = quantile_divisions(df, by, npartitions)
# Step 2 - Perform repartitioning shuffle
meta = df._meta._constructor_sliced([0])
if not isinstance(divisions, (gd.Series, gd.DataFrame)):
dtype = df[by[0]].dtype
divisions = df._meta._constructor_sliced(divisions, dtype=dtype)
partitions = df[by].map_partitions(
_set_partitions_pre,
divisions=divisions,
ascending=ascending,
na_position=na_position,
meta=meta,
)
df2 = df.assign(_partitions=partitions)
df3 = rearrange_by_column(
df2,
"_partitions",
max_branch=max_branch,
npartitions=len(divisions) - 1,
shuffle="tasks",
ignore_index=ignore_index,
).drop(columns=["_partitions"])
df3.divisions = (None,) * (df3.npartitions + 1)
# Step 3 - Return final sorted df
df4 = df3.map_partitions(sort_function, **sort_kwargs)
if not isinstance(divisions, gd.DataFrame) and set_divisions:
# Can't have multi-column divisions elsewhere in dask (yet)
df4.divisions = tuple(methods.tolist(divisions))
return df4
| 31.056667 | 79 | 0.634002 |
d7c2f376d2c67da32f38a4ad2cff6ec70f864d02 | 457 | py | Python | src/ToolChainSCDG/procedures/windows/custom_package/GetTickCount.py | AnonymousSEMA/SEMA-ToolChain | 05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82 | [
"BSD-2-Clause"
] | null | null | null | src/ToolChainSCDG/procedures/windows/custom_package/GetTickCount.py | AnonymousSEMA/SEMA-ToolChain | 05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82 | [
"BSD-2-Clause"
] | null | null | null | src/ToolChainSCDG/procedures/windows/custom_package/GetTickCount.py | AnonymousSEMA/SEMA-ToolChain | 05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82 | [
"BSD-2-Clause"
] | null | null | null | import logging
import time as timer
import angr
lw = logging.getLogger("CustomSimProcedureWindows")
class GetTickCount(angr.SimProcedure):
def run(self):
if angr.options.USE_SYSTEM_TIMES in self.state.options:
return int(timer.perf_counter() * 1000) + 12345
else:
val = self.state.solver.Unconstrained(
"retval_{}".format(self.display_name), self.arch.bits
)
return val
| 26.882353 | 69 | 0.641138 |
b75603ef2e99139a984024f1dbf54e58384c333d | 2,736 | py | Python | tensor2tensor/envs/time_step.py | SamuelmsWong/tensor2tensor | 7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af | [
"Apache-2.0"
] | 3 | 2021-01-19T20:21:15.000Z | 2021-01-19T21:36:37.000Z | tensor2tensor/envs/time_step.py | SamuelmsWong/tensor2tensor | 7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af | [
"Apache-2.0"
] | null | null | null | tensor2tensor/envs/time_step.py | SamuelmsWong/tensor2tensor | 7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af | [
"Apache-2.0"
] | 1 | 2020-06-19T17:36:10.000Z | 2020-06-19T17:36:10.000Z | # coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TimeStep is a simple class that holds the information seen at a time-step.
Let:
r_t = Reward(s_{t-1}, a_{t-1}, s_t) - reward for getting into a state.
d_t = Done(s_t) - is this state terminal.
a_t = Action performed at state s_t
i_t = (optional) Dictionary of key, value pairs of miscellaneous data.
Then the sequence of states, actions and rewards looks like the following:
s0, a0/i0 s1/r1/d1, a1/i1 s2/r2/d2, a2/i2 s3/r3/d3, ...
TimeStep holds (s_t, d_t, r_t, a_t, i_t).
NOTE: When we call step on an environment at time-step t, we supply a_t and in
return the env gives us s_{t+1}, d_{t+1}, r_{t+1}
So, we'd have to add the actions a_t/i_t to the current time-step, but add the
observations, rewards and dones to a new time-step.
NOTE: wrt `info` - A good solution could be to have two additional fields in
TimeStep - structured algo_info (a namedtuple, possibly different for every
algorithm, or None if we don't use any) and unstructured env_info (a dict).))
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
class TimeStep(
collections.namedtuple(
"TimeStep",
["observation", "done", "raw_reward", "processed_reward", "action",
"info"])):
"""This class represents the time-step as mentioned above."""
def replace(self, **kwargs):
"""Exposes the underlying namedtuple replace."""
# NOTE: This RETURNS a NEW time-step with the replacements, i.e. doesn't
# modify self, since namedtuple is immutable.
# This allows this to be called like ts.replace(action=a, raw_reward=r) etc.
return self._replace(**kwargs)
@classmethod
def create_time_step(cls,
observation=None,
done=False,
raw_reward=None,
processed_reward=None,
action=None,
info=None):
"""Creates a TimeStep with both rewards and actions as optional."""
return cls(observation, done, raw_reward, processed_reward, action,
info)
| 35.532468 | 80 | 0.685307 |
c30846895d84b5ba7497e41bb9be62cc946070a4 | 1,151 | py | Python | ansible-tests/validations/library/overcloudrc.py | rthallisey/clapper | 7f6aeae9320c2c8b46c8f56d2a6191ecc6991e5b | [
"Apache-2.0"
] | 13 | 2015-10-19T02:02:23.000Z | 2019-01-03T09:07:08.000Z | ansible-tests/validations/library/overcloudrc.py | rthallisey/clapper | 7f6aeae9320c2c8b46c8f56d2a6191ecc6991e5b | [
"Apache-2.0"
] | 42 | 2015-09-04T18:02:17.000Z | 2016-12-20T14:47:09.000Z | ansible-tests/validations/library/overcloudrc.py | rthallisey/clapper | 7f6aeae9320c2c8b46c8f56d2a6191ecc6991e5b | [
"Apache-2.0"
] | 22 | 2015-07-27T16:37:59.000Z | 2019-04-09T02:04:10.000Z | #!/usr/bin/env python
from ansible.module_utils.basic import *
import os.path
import subprocess
def main():
module = AnsibleModule(argument_spec=dict(
path=dict(required=True, type='str'),
))
overcloudrc_path = module.params.get('path')
if not os.path.isfile(overcloudrc_path):
module.fail_json(
msg="The overcloudrc file at {} does not exist.".format(
overcloudrc_path))
# Use bash to source overcloudrc and print the environment:
command = ['bash', '-c', 'source ' + overcloudrc_path + ' && env']
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.wait() != 0:
msg = "Could not source '{}'. Return code: {}.\nSTDERR:\n{}".format(
overcloudrc_path, proc.returncode, proc.stderr.read())
module.fail_json(msg=msg)
facts = {}
for line in proc.stdout:
(key, _, value) = line.partition("=")
if key.startswith("OS_"):
facts[key] = value.rstrip()
module.exit_json(changed=False, ansible_facts={'overcloudrc': facts})
if __name__ == '__main__':
main()
| 28.073171 | 76 | 0.622068 |
747e183b5d4bc7d1c5d10d4e84f7398fd8aca8b9 | 5,779 | py | Python | src/toil/cwl/utils.py | w-gao/toil | 10fea969578f3438b47cfa3b26e2d73394f02e54 | [
"Apache-2.0"
] | null | null | null | src/toil/cwl/utils.py | w-gao/toil | 10fea969578f3438b47cfa3b26e2d73394f02e54 | [
"Apache-2.0"
] | null | null | null | src/toil/cwl/utils.py | w-gao/toil | 10fea969578f3438b47cfa3b26e2d73394f02e54 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions used for Toil's CWL interpreter.
"""
import logging
import os
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
MutableMapping,
MutableSequence,
Tuple,
TypeVar,
Union,
)
from toil.fileStores import FileID
from toil.fileStores.abstractFileStore import AbstractFileStore
logger = logging.getLogger(__name__)
# Customized CWL utilities
def visit_top_cwl_class(
rec: Any,
classes: Iterable[str],
op: Callable[[Any], Any]
) -> None:
"""
Apply the given operation to all top-level CWL objects with the given named CWL class.
Like cwltool's visit_class but doesn't look inside any object visited.
"""
if isinstance(rec, MutableMapping):
if rec.get("class", None) in classes:
# This is one of the classes requested
# So process it
op(rec)
else:
# Look inside it instead
for key in rec:
visit_top_cwl_class(rec[key], classes, op)
elif isinstance(rec, MutableSequence):
# This item is actually a list of things, so look at all of them.
for key in rec:
visit_top_cwl_class(key, classes, op)
DownReturnType = TypeVar('DownReturnType')
UpReturnType = TypeVar('UpReturnType')
def visit_cwl_class_and_reduce(
rec: Any,
classes: Iterable[str],
op_down: Callable[[Any], DownReturnType],
op_up: Callable[[Any, DownReturnType, List[UpReturnType]], UpReturnType]
) -> List[UpReturnType]:
"""
Apply the given operations to all CWL objects with the given named CWL class.
Applies the down operation top-down, and the up operation bottom-up, and
passes the down operation's result and a list of the up operation results
for all child keys (flattening across lists and collapsing nodes of
non-matching classes) to the up operation.
:returns: The flattened list of up operation results from all calls.
"""
results = []
if isinstance(rec, MutableMapping):
child_results = []
if rec.get("class", None) in classes:
# Apply the down operation
down_result = op_down(rec)
for key in rec:
# Look inside and collect child results
for result in visit_cwl_class_and_reduce(rec[key], classes, op_down, op_up):
child_results.append(result)
if rec.get("class", None) in classes:
# Apply the up operation
results.append(op_up(rec, down_result, child_results))
else:
# We aren't processing here so pass up all the child results
results += child_results
elif isinstance(rec, MutableSequence):
# This item is actually a list of things, so look at all of them.
for key in rec:
for result in visit_cwl_class_and_reduce(key, classes, op_down, op_up):
# And flatten together all their results.
results.append(result)
return results
# Define a recursive type to represent a directory structure.
# The only problem is that MyPy can't yet type check recursive types like this.
# See: https://github.com/python/mypy/issues/731
# So we have to tell MyPy to ignore it.
DirectoryStructure = Dict[str, Union[str, 'DirectoryStructure']] # type: ignore
def download_structure(
file_store: AbstractFileStore,
index: Dict[str, str],
existing: Dict[str, str],
dir_dict: DirectoryStructure,
into_dir: str
) -> None:
"""
Download a whole nested dictionary of files and directories from the
Toil file store to a local path.
:param file_store: The Toil file store to download from.
:param index: Maps from downloaded file path back to input Toil URI.
:param existing: Maps from file_store_id URI to downloaded file path.
:param dir_dict: a dict from string to string (for files) or dict (for
subdirectories) describing a directory structure.
:param into_dir: The directory to download the top-level dict's files
into.
"""
logger.debug("Downloading directory with %s items", len(dir_dict))
for name, value in dir_dict.items():
if name == '.':
# Skip this key that isn't a real child file.
continue
if isinstance(value, dict):
# This is a subdirectory, so make it and download
# its contents
logger.debug("Downloading subdirectory %s", name)
subdir = os.path.join(into_dir, name)
os.mkdir(subdir)
download_structure(file_store, index, existing, value, subdir)
else:
# This must be a file path uploaded to Toil.
assert isinstance(value, str)
assert value.startswith("toilfile:")
logger.debug("Downloading contained file %s", name)
dest_path = os.path.join(into_dir, name)
# So download the file into place
file_store.readGlobalFile(FileID.unpack(value[len("toilfile:"):]), dest_path, symlink=True)
# Update the index dicts
# TODO: why?
index[dest_path] = value
existing[value] = dest_path
| 35.89441 | 103 | 0.662225 |
19500153b863fb610c02de6000eced2a762b2294 | 514 | py | Python | food_ke/scripts/extract_spans.py | IBPA/FoodAtlas | 0a431f0a391adaa8984b380f3f6f7189f27b9311 | [
"Apache-2.0"
] | 1 | 2022-02-07T10:04:35.000Z | 2022-02-07T10:04:35.000Z | food_ke/scripts/extract_spans.py | IBPA/FoodAtlas | 0a431f0a391adaa8984b380f3f6f7189f27b9311 | [
"Apache-2.0"
] | null | null | null | food_ke/scripts/extract_spans.py | IBPA/FoodAtlas | 0a431f0a391adaa8984b380f3f6f7189f27b9311 | [
"Apache-2.0"
] | null | null | null | from dagster import composite_solid, pipeline, solid
from food_ke.scripts.modes import dev, prod
from food_ke.scripts.ner import (
chunk_articles,
get_articles,
get_spans_from_chunks,
)
@composite_solid(required_resource_defs={"ner_training_io_manager"})
def get_ner_training_data_composite_solid():
articles = get_articles()
get_spans_from_chunks(chunk_articles(articles))
@pipeline(mode_defs=[dev, prod])
def get_ner_training_data_pipeline():
get_ner_training_data_composite_solid()
| 25.7 | 68 | 0.801556 |
198e59dfb16faa617db9927bed981bd111ff115c | 735 | py | Python | orderprocessing/orders/models/order.py | iomegak12/intel-training-usecase-1 | 0d1ab6f6076f46f7fbb290ceb41d6b851da1af3a | [
"MIT"
] | null | null | null | orderprocessing/orders/models/order.py | iomegak12/intel-training-usecase-1 | 0d1ab6f6076f46f7fbb290ceb41d6b851da1af3a | [
"MIT"
] | null | null | null | orderprocessing/orders/models/order.py | iomegak12/intel-training-usecase-1 | 0d1ab6f6076f46f7fbb290ceb41d6b851da1af3a | [
"MIT"
] | null | null | null | class Order:
def __init__(self, orderId: int, orderDate, customerId: int, productId: int, unitsOrdered: int, remarks):
self.orderId = orderId
self.orderDate = orderDate
self.customerId = customerId
self.productId = productId
self.unitsOrdered = unitsOrdered
self.remarks = remarks
def __str__(self):
return '{}, {}, {}, {}, {}, {}'.format(self.orderId,
self.orderDate,
self.customerId,
self.productId,
self.unitsOrdered,
self.remarks)
| 43.235294 | 109 | 0.444898 |
18ca5cf3e8776d3b1a3fd8ea0290c9826199544d | 32,786 | py | Python | netbox/dcim/models/device_components.py | v0tti/netbox | a9a20bf7195ba41b353b7d9b2989b643fea78ffd | [
"Apache-2.0"
] | null | null | null | netbox/dcim/models/device_components.py | v0tti/netbox | a9a20bf7195ba41b353b7d9b2989b643fea78ffd | [
"Apache-2.0"
] | null | null | null | netbox/dcim/models/device_components.py | v0tti/netbox | a9a20bf7195ba41b353b7d9b2989b643fea78ffd | [
"Apache-2.0"
] | null | null | null | import logging
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Sum
from django.urls import reverse
from taggit.managers import TaggableManager
from dcim.choices import *
from dcim.constants import *
from dcim.exceptions import CableTraceSplit
from dcim.fields import MACAddressField
from extras.models import ObjectChange, TaggedItem
from extras.utils import extras_features
from utilities.fields import NaturalOrderingField
from utilities.ordering import naturalize_interface
from utilities.querysets import RestrictedQuerySet
from utilities.query_functions import CollateAsChar
from utilities.utils import serialize_object
__all__ = (
'BaseInterface',
'CableTermination',
'ConsolePort',
'ConsoleServerPort',
'DeviceBay',
'FrontPort',
'Interface',
'InventoryItem',
'PowerOutlet',
'PowerPort',
'RearPort',
)
class ComponentModel(models.Model):
device = models.ForeignKey(
to='dcim.Device',
on_delete=models.CASCADE,
related_name='%(class)ss'
)
name = models.CharField(
max_length=64
)
_name = NaturalOrderingField(
target_field='name',
max_length=100,
blank=True
)
label = models.CharField(
max_length=64,
blank=True,
help_text="Physical label"
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
class Meta:
abstract = True
def __str__(self):
if self.label:
return f"{self.name} ({self.label})"
return self.name
def to_objectchange(self, action):
# Annotate the parent Device
try:
device = self.device
except ObjectDoesNotExist:
# The parent Device has already been deleted
device = None
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
related_object=device,
object_data=serialize_object(self)
)
@property
def parent(self):
return getattr(self, 'device', None)
class CableTermination(models.Model):
cable = models.ForeignKey(
to='dcim.Cable',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
# Generic relations to Cable. These ensure that an attached Cable is deleted if the terminated object is deleted.
_cabled_as_a = GenericRelation(
to='dcim.Cable',
content_type_field='termination_a_type',
object_id_field='termination_a_id'
)
_cabled_as_b = GenericRelation(
to='dcim.Cable',
content_type_field='termination_b_type',
object_id_field='termination_b_id'
)
class Meta:
abstract = True
def trace(self):
"""
Return three items: the traceable portion of a cable path, the termination points where it splits (if any), and
the remaining positions on the position stack (if any). Splits occur when the trace is initiated from a midpoint
along a path which traverses a RearPort. In cases where the originating endpoint is unknown, it is not possible
to know which corresponding FrontPort to follow. Remaining positions occur when tracing a path that traverses
a FrontPort without traversing a RearPort again.
The path is a list representing a complete cable path, with each individual segment represented as a
three-tuple:
[
(termination A, cable, termination B),
(termination C, cable, termination D),
(termination E, cable, termination F)
]
"""
endpoint = self
path = []
position_stack = []
def get_peer_port(termination):
from circuits.models import CircuitTermination
# Map a front port to its corresponding rear port
if isinstance(termination, FrontPort):
# Retrieve the corresponding RearPort from database to ensure we have an up-to-date instance
peer_port = RearPort.objects.get(pk=termination.rear_port.pk)
# Don't use the stack for RearPorts with a single position. Only remember the position at
# many-to-one points so we can select the correct FrontPort when we reach the corresponding
# one-to-many point.
if peer_port.positions > 1:
position_stack.append(termination)
return peer_port
# Map a rear port/position to its corresponding front port
elif isinstance(termination, RearPort):
if termination.positions > 1:
# Can't map to a FrontPort without a position if there are multiple options
if not position_stack:
raise CableTraceSplit(termination)
front_port = position_stack.pop()
position = front_port.rear_port_position
# Validate the position
if position not in range(1, termination.positions + 1):
raise Exception("Invalid position for {} ({} positions): {})".format(
termination, termination.positions, position
))
else:
# Don't use the stack for RearPorts with a single position. The only possible position is 1.
position = 1
try:
peer_port = FrontPort.objects.get(
rear_port=termination,
rear_port_position=position,
)
return peer_port
except ObjectDoesNotExist:
return None
# Follow a circuit to its other termination
elif isinstance(termination, CircuitTermination):
peer_termination = termination.get_peer_termination()
if peer_termination is None:
return None
return peer_termination
# Termination is not a pass-through port
else:
return None
logger = logging.getLogger('netbox.dcim.cable.trace')
logger.debug("Tracing cable from {} {}".format(self.parent, self))
while endpoint is not None:
# No cable connected; nothing to trace
if not endpoint.cable:
path.append((endpoint, None, None))
logger.debug("No cable connected")
return path, None, position_stack
# Check for loops
if endpoint.cable in [segment[1] for segment in path]:
logger.debug("Loop detected!")
return path, None, position_stack
# Record the current segment in the path
far_end = endpoint.get_cable_peer()
path.append((endpoint, endpoint.cable, far_end))
logger.debug("{}[{}] --- Cable {} ---> {}[{}]".format(
endpoint.parent, endpoint, endpoint.cable.pk, far_end.parent, far_end
))
# Get the peer port of the far end termination
try:
endpoint = get_peer_port(far_end)
except CableTraceSplit as e:
return path, e.termination.frontports.all(), position_stack
if endpoint is None:
return path, None, position_stack
def get_cable_peer(self):
if self.cable is None:
return None
if self._cabled_as_a.exists():
return self.cable.termination_b
if self._cabled_as_b.exists():
return self.cable.termination_a
def get_path_endpoints(self):
"""
Return all endpoints of paths which traverse this object.
"""
endpoints = []
# Get the far end of the last path segment
path, split_ends, position_stack = self.trace()
endpoint = path[-1][2]
if split_ends is not None:
for termination in split_ends:
endpoints.extend(termination.get_path_endpoints())
elif endpoint is not None:
endpoints.append(endpoint)
return endpoints
#
# Console ports
#
@extras_features('export_templates', 'webhooks')
class ConsolePort(CableTermination, ComponentModel):
"""
A physical console port within a Device. ConsolePorts connect to ConsoleServerPorts.
"""
type = models.CharField(
max_length=50,
choices=ConsolePortTypeChoices,
blank=True,
help_text='Physical port type'
)
connected_endpoint = models.OneToOneField(
to='dcim.ConsoleServerPort',
on_delete=models.SET_NULL,
related_name='connected_endpoint',
blank=True,
null=True
)
connection_status = models.BooleanField(
choices=CONNECTION_STATUS_CHOICES,
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:consoleport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.type,
self.description,
)
#
# Console server ports
#
@extras_features('webhooks')
class ConsoleServerPort(CableTermination, ComponentModel):
"""
A physical port within a Device (typically a designated console server) which provides access to ConsolePorts.
"""
type = models.CharField(
max_length=50,
choices=ConsolePortTypeChoices,
blank=True,
help_text='Physical port type'
)
connection_status = models.BooleanField(
choices=CONNECTION_STATUS_CHOICES,
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:consoleserverport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.type,
self.description,
)
#
# Power ports
#
@extras_features('export_templates', 'webhooks')
class PowerPort(CableTermination, ComponentModel):
"""
A physical power supply (intake) port within a Device. PowerPorts connect to PowerOutlets.
"""
type = models.CharField(
max_length=50,
choices=PowerPortTypeChoices,
blank=True,
help_text='Physical port type'
)
maximum_draw = models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1)],
help_text="Maximum power draw (watts)"
)
allocated_draw = models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1)],
help_text="Allocated power draw (watts)"
)
_connected_poweroutlet = models.OneToOneField(
to='dcim.PowerOutlet',
on_delete=models.SET_NULL,
related_name='connected_endpoint',
blank=True,
null=True
)
_connected_powerfeed = models.OneToOneField(
to='dcim.PowerFeed',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
connection_status = models.BooleanField(
choices=CONNECTION_STATUS_CHOICES,
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:powerport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.maximum_draw,
self.allocated_draw,
self.description,
)
@property
def connected_endpoint(self):
"""
Return the connected PowerOutlet, if it exists, or the connected PowerFeed, if it exists. We have to check for
ObjectDoesNotExist in case the referenced object has been deleted from the database.
"""
try:
if self._connected_poweroutlet:
return self._connected_poweroutlet
except ObjectDoesNotExist:
pass
try:
if self._connected_powerfeed:
return self._connected_powerfeed
except ObjectDoesNotExist:
pass
return None
@connected_endpoint.setter
def connected_endpoint(self, value):
# TODO: Fix circular import
from . import PowerFeed
if value is None:
self._connected_poweroutlet = None
self._connected_powerfeed = None
elif isinstance(value, PowerOutlet):
self._connected_poweroutlet = value
self._connected_powerfeed = None
elif isinstance(value, PowerFeed):
self._connected_poweroutlet = None
self._connected_powerfeed = value
else:
raise ValueError(
"Connected endpoint must be a PowerOutlet or PowerFeed, not {}.".format(type(value))
)
def get_power_draw(self):
"""
Return the allocated and maximum power draw (in VA) and child PowerOutlet count for this PowerPort.
"""
# Calculate aggregate draw of all child power outlets if no numbers have been defined manually
if self.allocated_draw is None and self.maximum_draw is None:
outlet_ids = PowerOutlet.objects.filter(power_port=self).values_list('pk', flat=True)
utilization = PowerPort.objects.filter(_connected_poweroutlet_id__in=outlet_ids).aggregate(
maximum_draw_total=Sum('maximum_draw'),
allocated_draw_total=Sum('allocated_draw'),
)
ret = {
'allocated': utilization['allocated_draw_total'] or 0,
'maximum': utilization['maximum_draw_total'] or 0,
'outlet_count': len(outlet_ids),
'legs': [],
}
# Calculate per-leg aggregates for three-phase feeds
if self._connected_powerfeed and self._connected_powerfeed.phase == PowerFeedPhaseChoices.PHASE_3PHASE:
for leg, leg_name in PowerOutletFeedLegChoices:
outlet_ids = PowerOutlet.objects.filter(power_port=self, feed_leg=leg).values_list('pk', flat=True)
utilization = PowerPort.objects.filter(_connected_poweroutlet_id__in=outlet_ids).aggregate(
maximum_draw_total=Sum('maximum_draw'),
allocated_draw_total=Sum('allocated_draw'),
)
ret['legs'].append({
'name': leg_name,
'allocated': utilization['allocated_draw_total'] or 0,
'maximum': utilization['maximum_draw_total'] or 0,
'outlet_count': len(outlet_ids),
})
return ret
# Default to administratively defined values
return {
'allocated': self.allocated_draw or 0,
'maximum': self.maximum_draw or 0,
'outlet_count': PowerOutlet.objects.filter(power_port=self).count(),
'legs': [],
}
#
# Power outlets
#
@extras_features('webhooks')
class PowerOutlet(CableTermination, ComponentModel):
"""
A physical power outlet (output) within a Device which provides power to a PowerPort.
"""
type = models.CharField(
max_length=50,
choices=PowerOutletTypeChoices,
blank=True,
help_text='Physical port type'
)
power_port = models.ForeignKey(
to='dcim.PowerPort',
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='poweroutlets'
)
feed_leg = models.CharField(
max_length=50,
choices=PowerOutletFeedLegChoices,
blank=True,
help_text="Phase (for three-phase feeds)"
)
connection_status = models.BooleanField(
choices=CONNECTION_STATUS_CHOICES,
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:poweroutlet', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.power_port.name if self.power_port else None,
self.get_feed_leg_display(),
self.description,
)
def clean(self):
# Validate power port assignment
if self.power_port and self.power_port.device != self.device:
raise ValidationError(
"Parent power port ({}) must belong to the same device".format(self.power_port)
)
#
# Interfaces
#
class BaseInterface(models.Model):
"""
Abstract base class for fields shared by dcim.Interface and virtualization.VMInterface.
"""
enabled = models.BooleanField(
default=True
)
mac_address = MACAddressField(
null=True,
blank=True,
verbose_name='MAC Address'
)
mtu = models.PositiveIntegerField(
blank=True,
null=True,
validators=[MinValueValidator(1), MaxValueValidator(65536)],
verbose_name='MTU'
)
mode = models.CharField(
max_length=50,
choices=InterfaceModeChoices,
blank=True
)
class Meta:
abstract = True
@extras_features('graphs', 'export_templates', 'webhooks')
class Interface(CableTermination, ComponentModel, BaseInterface):
"""
A network interface within a Device. A physical Interface can connect to exactly one other Interface.
"""
# Override ComponentModel._name to specify naturalize_interface function
_name = NaturalOrderingField(
target_field='name',
naturalize_function=naturalize_interface,
max_length=100,
blank=True
)
_connected_interface = models.OneToOneField(
to='self',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
_connected_circuittermination = models.OneToOneField(
to='circuits.CircuitTermination',
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
connection_status = models.BooleanField(
choices=CONNECTION_STATUS_CHOICES,
blank=True,
null=True
)
lag = models.ForeignKey(
to='self',
on_delete=models.SET_NULL,
related_name='member_interfaces',
null=True,
blank=True,
verbose_name='Parent LAG'
)
type = models.CharField(
max_length=50,
choices=InterfaceTypeChoices
)
mgmt_only = models.BooleanField(
default=False,
verbose_name='OOB Management',
help_text='This interface is used only for out-of-band management'
)
untagged_vlan = models.ForeignKey(
to='ipam.VLAN',
on_delete=models.SET_NULL,
related_name='interfaces_as_untagged',
null=True,
blank=True,
verbose_name='Untagged VLAN'
)
tagged_vlans = models.ManyToManyField(
to='ipam.VLAN',
related_name='interfaces_as_tagged',
blank=True,
verbose_name='Tagged VLANs'
)
ip_addresses = GenericRelation(
to='ipam.IPAddress',
content_type_field='assigned_object_type',
object_id_field='assigned_object_id',
related_query_name='interface'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = [
'device', 'name', 'label', 'lag', 'type', 'enabled', 'mac_address', 'mtu', 'mgmt_only', 'description', 'mode',
]
class Meta:
ordering = ('device', CollateAsChar('_name'))
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:interface', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier if self.device else None,
self.name,
self.label,
self.lag.name if self.lag else None,
self.get_type_display(),
self.enabled,
self.mac_address,
self.mtu,
self.mgmt_only,
self.description,
self.get_mode_display(),
)
def clean(self):
# Virtual interfaces cannot be connected
if self.type in NONCONNECTABLE_IFACE_TYPES and (
self.cable or getattr(self, 'circuit_termination', False)
):
raise ValidationError({
'type': "Virtual and wireless interfaces cannot be connected to another interface or circuit. "
"Disconnect the interface or choose a suitable type."
})
# An interface's LAG must belong to the same device or virtual chassis
if self.lag and self.lag.device != self.device:
if self.device.virtual_chassis is None:
raise ValidationError({
'lag': f"The selected LAG interface ({self.lag}) belongs to a different device ({self.lag.device})."
})
elif self.lag.device.virtual_chassis != self.device.virtual_chassis:
raise ValidationError({
'lag': f"The selected LAG interface ({self.lag}) belongs to {self.lag.device}, which is not part "
f"of virtual chassis {self.device.virtual_chassis}."
})
# A virtual interface cannot have a parent LAG
if self.type == InterfaceTypeChoices.TYPE_VIRTUAL and self.lag is not None:
raise ValidationError({'lag': "Virtual interfaces cannot have a parent LAG interface."})
# A LAG interface cannot be its own parent
if self.pk and self.lag_id == self.pk:
raise ValidationError({'lag': "A LAG interface cannot be its own parent."})
# Validate untagged VLAN
if self.untagged_vlan and self.untagged_vlan.site not in [self.parent.site, None]:
raise ValidationError({
'untagged_vlan': "The untagged VLAN ({}) must belong to the same site as the interface's parent "
"device, or it must be global".format(self.untagged_vlan)
})
def save(self, *args, **kwargs):
# Remove untagged VLAN assignment for non-802.1Q interfaces
if self.mode is None:
self.untagged_vlan = None
# Only "tagged" interfaces may have tagged VLANs assigned. ("tagged all" implies all VLANs are assigned.)
if self.pk and self.mode != InterfaceModeChoices.MODE_TAGGED:
self.tagged_vlans.clear()
return super().save(*args, **kwargs)
@property
def connected_endpoint(self):
"""
Return the connected Interface, if it exists, or the connected CircuitTermination, if it exists. We have to
check for ObjectDoesNotExist in case the referenced object has been deleted from the database.
"""
try:
if self._connected_interface:
return self._connected_interface
except ObjectDoesNotExist:
pass
try:
if self._connected_circuittermination:
return self._connected_circuittermination
except ObjectDoesNotExist:
pass
return None
@connected_endpoint.setter
def connected_endpoint(self, value):
from circuits.models import CircuitTermination
if value is None:
self._connected_interface = None
self._connected_circuittermination = None
elif isinstance(value, Interface):
self._connected_interface = value
self._connected_circuittermination = None
elif isinstance(value, CircuitTermination):
self._connected_interface = None
self._connected_circuittermination = value
else:
raise ValueError(
"Connected endpoint must be an Interface or CircuitTermination, not {}.".format(type(value))
)
@property
def parent(self):
return self.device
@property
def is_connectable(self):
return self.type not in NONCONNECTABLE_IFACE_TYPES
@property
def is_virtual(self):
return self.type in VIRTUAL_IFACE_TYPES
@property
def is_wireless(self):
return self.type in WIRELESS_IFACE_TYPES
@property
def is_lag(self):
return self.type == InterfaceTypeChoices.TYPE_LAG
@property
def count_ipaddresses(self):
return self.ip_addresses.count()
#
# Pass-through ports
#
@extras_features('webhooks')
class FrontPort(CableTermination, ComponentModel):
"""
A pass-through port on the front of a Device.
"""
type = models.CharField(
max_length=50,
choices=PortTypeChoices
)
rear_port = models.ForeignKey(
to='dcim.RearPort',
on_delete=models.CASCADE,
related_name='frontports'
)
rear_port_position = models.PositiveSmallIntegerField(
default=1,
validators=[
MinValueValidator(REARPORT_POSITIONS_MIN),
MaxValueValidator(REARPORT_POSITIONS_MAX)
]
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = (
('device', 'name'),
('rear_port', 'rear_port_position'),
)
def get_absolute_url(self):
return reverse('dcim:frontport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.rear_port.name,
self.rear_port_position,
self.description,
)
def clean(self):
# Validate rear port assignment
if self.rear_port.device != self.device:
raise ValidationError(
"Rear port ({}) must belong to the same device".format(self.rear_port)
)
# Validate rear port position assignment
if self.rear_port_position > self.rear_port.positions:
raise ValidationError(
"Invalid rear port position ({}); rear port {} has only {} positions".format(
self.rear_port_position, self.rear_port.name, self.rear_port.positions
)
)
@extras_features('webhooks')
class RearPort(CableTermination, ComponentModel):
"""
A pass-through port on the rear of a Device.
"""
type = models.CharField(
max_length=50,
choices=PortTypeChoices
)
positions = models.PositiveSmallIntegerField(
default=1,
validators=[
MinValueValidator(REARPORT_POSITIONS_MIN),
MaxValueValidator(REARPORT_POSITIONS_MAX)
]
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'type', 'positions', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:rearport', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.get_type_display(),
self.positions,
self.description,
)
#
# Device bays
#
@extras_features('webhooks')
class DeviceBay(ComponentModel):
"""
An empty space within a Device which can house a child device
"""
installed_device = models.OneToOneField(
to='dcim.Device',
on_delete=models.SET_NULL,
related_name='parent_bay',
blank=True,
null=True
)
tags = TaggableManager(through=TaggedItem)
csv_headers = ['device', 'name', 'label', 'installed_device', 'description']
class Meta:
ordering = ('device', '_name')
unique_together = ('device', 'name')
def get_absolute_url(self):
return reverse('dcim:devicebay', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.identifier,
self.name,
self.label,
self.installed_device.identifier if self.installed_device else None,
self.description,
)
def clean(self):
# Validate that the parent Device can have DeviceBays
if not self.device.device_type.is_parent_device:
raise ValidationError("This type of device ({}) does not support device bays.".format(
self.device.device_type
))
# Cannot install a device into itself, obviously
if self.device == self.installed_device:
raise ValidationError("Cannot install a device into itself.")
# Check that the installed device is not already installed elsewhere
if self.installed_device:
current_bay = DeviceBay.objects.filter(installed_device=self.installed_device).first()
if current_bay and current_bay != self:
raise ValidationError({
'installed_device': "Cannot install the specified device; device is already installed in {}".format(
current_bay
)
})
#
# Inventory items
#
@extras_features('export_templates', 'webhooks')
class InventoryItem(ComponentModel):
"""
An InventoryItem represents a serialized piece of hardware within a Device, such as a line card or power supply.
InventoryItems are used only for inventory purposes.
"""
parent = models.ForeignKey(
to='self',
on_delete=models.CASCADE,
related_name='child_items',
blank=True,
null=True
)
manufacturer = models.ForeignKey(
to='dcim.Manufacturer',
on_delete=models.PROTECT,
related_name='inventory_items',
blank=True,
null=True
)
part_id = models.CharField(
max_length=50,
verbose_name='Part ID',
blank=True,
help_text='Manufacturer-assigned part identifier'
)
serial = models.CharField(
max_length=50,
verbose_name='Serial number',
blank=True
)
asset_tag = models.CharField(
max_length=50,
unique=True,
blank=True,
null=True,
verbose_name='Asset tag',
help_text='A unique tag used to identify this item'
)
discovered = models.BooleanField(
default=False,
help_text='This item was automatically discovered'
)
tags = TaggableManager(through=TaggedItem)
csv_headers = [
'device', 'name', 'label', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered', 'description',
]
class Meta:
ordering = ('device__id', 'parent__id', '_name')
unique_together = ('device', 'parent', 'name')
def get_absolute_url(self):
return reverse('dcim:inventoryitem', kwargs={'pk': self.pk})
def to_csv(self):
return (
self.device.name or '{{{}}}'.format(self.device.pk),
self.name,
self.label,
self.manufacturer.name if self.manufacturer else None,
self.part_id,
self.serial,
self.asset_tag,
self.discovered,
self.description,
)
| 31.862002 | 120 | 0.60718 |
a0bcf1a98bec279eff34eb1cd30da2b38ae7be3c | 1,259 | py | Python | V/tool.py | lllllaurel/jujiaodata | 96bbd9386ba0746a0cccd0f1976098115f74ffb3 | [
"MIT"
] | null | null | null | V/tool.py | lllllaurel/jujiaodata | 96bbd9386ba0746a0cccd0f1976098115f74ffb3 | [
"MIT"
] | null | null | null | V/tool.py | lllllaurel/jujiaodata | 96bbd9386ba0746a0cccd0f1976098115f74ffb3 | [
"MIT"
] | null | null | null | """
@author: laurel
@file: admin.py.py
@time: 2019/05/15
@description: tools
"""
from django.shortcuts import render, render_to_response
from django.http import JsonResponse,HttpResponse, HttpResponseRedirect
from syapp.models import YsArticle, Ips, Logs
from django import forms
from syapp.models import UserMain
from django.db.models import Sum
import time,hashlib,os,datetime
import json,math
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def geoDistanceLayout(request):
return render(request, 'tools.html', {})
def geoDistance(request):
geohash1 = request.GET.get('geohash1')
geohash2 = request.GET.get('geohash2')
hashlist1 = geohash1.split(',')
hashlist2 = geohash2.split(',')
if len(hashlist1) != 2 or len(hashlist2) != 2:
return HttpResponse(-1)
return HttpResponse(EarthDistance(float(hashlist1[1]), float(hashlist1[0]), float(hashlist2[1]), float(hashlist2[0])))
def EarthDistance(lat1,lng1,lat2,lng2):
radius = float(6371000)
rad = math.pi/180.0
lat1 = lat1*rad
lng1 = lng1*rad
lat2 = lat2*rad
lng2 = lng2*rad
theta = lng2-lng1
dist = math.acos(math.sin(lat1)*math.sin(lat2)+math.cos(lat1)*math.cos(lat2)*math.cos(theta))
return dist*radius
| 29.27907 | 122 | 0.713264 |
6a93c1412d419f3bb810c2747bbef9124116c50f | 2,397 | py | Python | server/swagger_server/models/notification_list.py | kakwa/certascale | 0df8da0f518506500117152fd0e28ee3286949af | [
"MIT"
] | null | null | null | server/swagger_server/models/notification_list.py | kakwa/certascale | 0df8da0f518506500117152fd0e28ee3286949af | [
"MIT"
] | null | null | null | server/swagger_server/models/notification_list.py | kakwa/certascale | 0df8da0f518506500117152fd0e28ee3286949af | [
"MIT"
] | 2 | 2020-11-04T03:07:00.000Z | 2020-11-05T08:14:33.000Z | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.notification import Notification # noqa: F401,E501
from swagger_server import util
class NotificationList(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, list: List[Notification]=None, next_id: int=None): # noqa: E501
"""NotificationList - a model defined in Swagger
:param list: The list of this NotificationList. # noqa: E501
:type list: List[Notification]
:param next_id: The next_id of this NotificationList. # noqa: E501
:type next_id: int
"""
self.swagger_types = {
'list': List[Notification],
'next_id': int
}
self.attribute_map = {
'list': 'list',
'next_id': 'next_id'
}
self._list = list
self._next_id = next_id
@classmethod
def from_dict(cls, dikt) -> 'NotificationList':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The NotificationList of this NotificationList. # noqa: E501
:rtype: NotificationList
"""
return util.deserialize_model(dikt, cls)
@property
def list(self) -> List[Notification]:
"""Gets the list of this NotificationList.
:return: The list of this NotificationList.
:rtype: List[Notification]
"""
return self._list
@list.setter
def list(self, list: List[Notification]):
"""Sets the list of this NotificationList.
:param list: The list of this NotificationList.
:type list: List[Notification]
"""
self._list = list
@property
def next_id(self) -> int:
"""Gets the next_id of this NotificationList.
:return: The next_id of this NotificationList.
:rtype: int
"""
return self._next_id
@next_id.setter
def next_id(self, next_id: int):
"""Sets the next_id of this NotificationList.
:param next_id: The next_id of this NotificationList.
:type next_id: int
"""
self._next_id = next_id
| 26.054348 | 87 | 0.618273 |
5657d8b2917ccfae3102c8c80af5d467d2efe519 | 4,351 | py | Python | conveyor.py | AndrBRus/Auto-manufacture-simulation | 0bffcc2109804da8be3d71d67e2881ec52030707 | [
"MIT"
] | null | null | null | conveyor.py | AndrBRus/Auto-manufacture-simulation | 0bffcc2109804da8be3d71d67e2881ec52030707 | [
"MIT"
] | null | null | null | conveyor.py | AndrBRus/Auto-manufacture-simulation | 0bffcc2109804da8be3d71d67e2881ec52030707 | [
"MIT"
] | null | null | null | # This module contains info about conveyor and actions on it.
# This class describes the conveyor,
# also all the functions that it can perform.
class conveyor(object):
# class constructor
def __init__(self):
self.work_status = True # status of conveyor work: True if conveyor is moving else False
self.cars_on_conveyor = list() # list with cars on conveyor
self.stop_time = 0 # time to delivery details
self.stop_detail = list() # details that was stopping production
# return status of conveyor work
def status(self):
return self.work_status
# return status of number of cars now on conveyor
def numb_cars(self):
return len(self.cars_on_conveyor)
def numb_cars_on_conveyor(self):
numb_cars = 0
for iter in range(0, len(self.cars_on_conveyor)):
if self.cars_on_conveyor[iter].status != 'MD' or self.cars_on_conveyor[iter].status != 'DS':
numb_cars += 1
return numb_cars
# add car to conveyor
def add_car(self, car):
self.cars_on_conveyor.append(car)
# add time to stop conveyor until delivery details
def stop(self, part_details, part_name):
# if more than one details need to delivery
if type(part_name) == type(list()):
time_delivery_list = list()
for iter in part_name:
time_delivery_list.append(part_details.parts_dict[iter][5] * 60)
self.stop_detail.append(iter)
# if stop time until delivery less that new time to delivery other detail
if self.work_status == False and max(time_delivery_list) > self.stop_time:
self.stop_time = max(time_delivery_list) # edit stopping time
# else if conveyor is moving
elif self.work_status == True:
self.work_status = False # stop conveyor
self.stop_time = max(time_delivery_list) # add stopping time
# else one detail
else:
# if stop time until delivery less that new time to delivery other detail
if self.work_status == False and part_details.parts_dict[part_name][5] * 60 > self.stop_time:
self.stop_time = part_details.parts_dict[part_name][5] * 60 # edit stopping time
# else if conveyor is moving
elif self.work_status == True:
self.stop_time = part_details.parts_dict[part_name][5] * 60 # add time to stopping time
self.work_status = False # stop moving
self.stop_detail.append(part_name) # add name to stopping detail
# this fucntion start conveyor and restores stocks of all details
def start(self, parts_data):
self.stop_time = 0 # reset stop time
self.work_status = True # conveyor is moving
# if more than one detail in stopping list
if type(self.stop_detail) == type(list()):
# for each detail in stopping list
for iter in self.stop_detail:
parts_data.parts_dict[iter][0] += parts_data.parts_dict[iter][6] # delivery parts to warehouse
# cancellation of a stopping details list
self.stop_details = list()
# if one detail in stopping list
else:
parts_data.parts_dict[self.stop_detail][0] += parts_data.parts_dict[self.stop_detail][6] # delivery parts to warehouse
# this function is reduce stop time and starts conveyor if stop time is up
def edit_stop_time(self, time_interval, parts_data):
self.stop_time -= time_interval # reduce the stop time by time interval from "config_data" class
# if stop time is up
if self.stop_time <= 0:
self.start(parts_data) # start conveyor
# this function checks for finished machines or machines for disposal
def ready_disposal_cars(self):
# iterating through the whole list
for iter in range(0, self.numb_cars()):
# if there is a ready car or a car for recycling
if self.cars_on_conveyor[iter].status == 'RC' or self.cars_on_conveyor[iter].status == 'DS':
return True # then we return True
return False # else return False | 50.011494 | 131 | 0.627672 |
1b64f7cd93bfef8169b729cb08b9e83b7aa851a7 | 13,649 | py | Python | sdk/python/pulumi_aws/ec2/local_gateway_route_table_vpc_association.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/local_gateway_route_table_vpc_association.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/local_gateway_route_table_vpc_association.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['LocalGatewayRouteTableVpcAssociationArgs', 'LocalGatewayRouteTableVpcAssociation']
@pulumi.input_type
class LocalGatewayRouteTableVpcAssociationArgs:
def __init__(__self__, *,
local_gateway_route_table_id: pulumi.Input[str],
vpc_id: pulumi.Input[str],
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a LocalGatewayRouteTableVpcAssociation resource.
:param pulumi.Input[str] local_gateway_route_table_id: Identifier of EC2 Local Gateway Route Table.
:param pulumi.Input[str] vpc_id: Identifier of EC2 VPC.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
"""
pulumi.set(__self__, "local_gateway_route_table_id", local_gateway_route_table_id)
pulumi.set(__self__, "vpc_id", vpc_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="localGatewayRouteTableId")
def local_gateway_route_table_id(self) -> pulumi.Input[str]:
"""
Identifier of EC2 Local Gateway Route Table.
"""
return pulumi.get(self, "local_gateway_route_table_id")
@local_gateway_route_table_id.setter
def local_gateway_route_table_id(self, value: pulumi.Input[str]):
pulumi.set(self, "local_gateway_route_table_id", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
Identifier of EC2 VPC.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _LocalGatewayRouteTableVpcAssociationState:
def __init__(__self__, *,
local_gateway_id: Optional[pulumi.Input[str]] = None,
local_gateway_route_table_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LocalGatewayRouteTableVpcAssociation resources.
:param pulumi.Input[str] local_gateway_route_table_id: Identifier of EC2 Local Gateway Route Table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
:param pulumi.Input[str] vpc_id: Identifier of EC2 VPC.
"""
if local_gateway_id is not None:
pulumi.set(__self__, "local_gateway_id", local_gateway_id)
if local_gateway_route_table_id is not None:
pulumi.set(__self__, "local_gateway_route_table_id", local_gateway_route_table_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter(name="localGatewayId")
def local_gateway_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "local_gateway_id")
@local_gateway_id.setter
def local_gateway_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_gateway_id", value)
@property
@pulumi.getter(name="localGatewayRouteTableId")
def local_gateway_route_table_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of EC2 Local Gateway Route Table.
"""
return pulumi.get(self, "local_gateway_route_table_id")
@local_gateway_route_table_id.setter
def local_gateway_route_table_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_gateway_route_table_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
Identifier of EC2 VPC.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
class LocalGatewayRouteTableVpcAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
local_gateway_route_table_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an EC2 Local Gateway Route Table VPC Association. More information can be found in the [Outposts User Guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-local-gateways.html#vpc-associations).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_local_gateway_route_table = aws.ec2.get_local_gateway_route_table(outpost_arn="arn:aws:outposts:us-west-2:123456789012:outpost/op-1234567890abcdef")
example_vpc = aws.ec2.Vpc("exampleVpc", cidr_block="10.0.0.0/16")
example_local_gateway_route_table_vpc_association = aws.ec2.LocalGatewayRouteTableVpcAssociation("exampleLocalGatewayRouteTableVpcAssociation",
local_gateway_route_table_id=example_local_gateway_route_table.id,
vpc_id=example_vpc.id)
```
## Import
`aws_ec2_local_gateway_route_table_vpc_association` can be imported by using the Local Gateway Route Table VPC Association identifier, e.g.
```sh
$ pulumi import aws:ec2/localGatewayRouteTableVpcAssociation:LocalGatewayRouteTableVpcAssociation example lgw-vpc-assoc-1234567890abcdef
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] local_gateway_route_table_id: Identifier of EC2 Local Gateway Route Table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
:param pulumi.Input[str] vpc_id: Identifier of EC2 VPC.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LocalGatewayRouteTableVpcAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an EC2 Local Gateway Route Table VPC Association. More information can be found in the [Outposts User Guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-local-gateways.html#vpc-associations).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_local_gateway_route_table = aws.ec2.get_local_gateway_route_table(outpost_arn="arn:aws:outposts:us-west-2:123456789012:outpost/op-1234567890abcdef")
example_vpc = aws.ec2.Vpc("exampleVpc", cidr_block="10.0.0.0/16")
example_local_gateway_route_table_vpc_association = aws.ec2.LocalGatewayRouteTableVpcAssociation("exampleLocalGatewayRouteTableVpcAssociation",
local_gateway_route_table_id=example_local_gateway_route_table.id,
vpc_id=example_vpc.id)
```
## Import
`aws_ec2_local_gateway_route_table_vpc_association` can be imported by using the Local Gateway Route Table VPC Association identifier, e.g.
```sh
$ pulumi import aws:ec2/localGatewayRouteTableVpcAssociation:LocalGatewayRouteTableVpcAssociation example lgw-vpc-assoc-1234567890abcdef
```
:param str resource_name: The name of the resource.
:param LocalGatewayRouteTableVpcAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LocalGatewayRouteTableVpcAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
local_gateway_route_table_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LocalGatewayRouteTableVpcAssociationArgs.__new__(LocalGatewayRouteTableVpcAssociationArgs)
if local_gateway_route_table_id is None and not opts.urn:
raise TypeError("Missing required property 'local_gateway_route_table_id'")
__props__.__dict__["local_gateway_route_table_id"] = local_gateway_route_table_id
__props__.__dict__["tags"] = tags
if vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_id'")
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["local_gateway_id"] = None
super(LocalGatewayRouteTableVpcAssociation, __self__).__init__(
'aws:ec2/localGatewayRouteTableVpcAssociation:LocalGatewayRouteTableVpcAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
local_gateway_id: Optional[pulumi.Input[str]] = None,
local_gateway_route_table_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None) -> 'LocalGatewayRouteTableVpcAssociation':
"""
Get an existing LocalGatewayRouteTableVpcAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] local_gateway_route_table_id: Identifier of EC2 Local Gateway Route Table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags.
:param pulumi.Input[str] vpc_id: Identifier of EC2 VPC.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LocalGatewayRouteTableVpcAssociationState.__new__(_LocalGatewayRouteTableVpcAssociationState)
__props__.__dict__["local_gateway_id"] = local_gateway_id
__props__.__dict__["local_gateway_route_table_id"] = local_gateway_route_table_id
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc_id"] = vpc_id
return LocalGatewayRouteTableVpcAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="localGatewayId")
def local_gateway_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "local_gateway_id")
@property
@pulumi.getter(name="localGatewayRouteTableId")
def local_gateway_route_table_id(self) -> pulumi.Output[str]:
"""
Identifier of EC2 Local Gateway Route Table.
"""
return pulumi.get(self, "local_gateway_route_table_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
Identifier of EC2 VPC.
"""
return pulumi.get(self, "vpc_id")
| 44.604575 | 226 | 0.676313 |
1eadc641a192e5d5bb27a43c02953191594fe457 | 742 | py | Python | portfolio/home/migrations/0001_initial.py | queenieroseongcal98/website-design-gtms | 40ca21be4ad2fed1c1580562aa25e7238b0e2faf | [
"MIT"
] | null | null | null | portfolio/home/migrations/0001_initial.py | queenieroseongcal98/website-design-gtms | 40ca21be4ad2fed1c1580562aa25e7238b0e2faf | [
"MIT"
] | null | null | null | portfolio/home/migrations/0001_initial.py | queenieroseongcal98/website-design-gtms | 40ca21be4ad2fed1c1580562aa25e7238b0e2faf | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-02-05 09:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=11)),
('address', models.CharField(default='DEFAULT VALUE', max_length=100)),
('desc', models.TextField()),
],
),
]
| 28.538462 | 114 | 0.563342 |
788b5d6d5cf705ad4e1cc47bf110b306d6e3cf9f | 1,425 | py | Python | move_base_goal_receiv.py | parkhyeonseung/ddarawa | 9148b7fd1f61ac48aa632143ef00dce58f52978c | [
"Apache-2.0"
] | 1 | 2022-01-13T03:26:26.000Z | 2022-01-13T03:26:26.000Z | move_base_goal_receiv.py | parkhyeonseung/ddarawa | 9148b7fd1f61ac48aa632143ef00dce58f52978c | [
"Apache-2.0"
] | null | null | null | move_base_goal_receiv.py | parkhyeonseung/ddarawa | 9148b7fd1f61ac48aa632143ef00dce58f52978c | [
"Apache-2.0"
] | null | null | null | import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import socket
import pickle ,time
def movebase_client():
receiver = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
receiver.bind((master_ip,7778))
client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
client.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
while True:
bytepair = receiver.recvfrom(1024)
message = pickle.loads(bytepair[0])
print(message)
goal.target_pose.pose.position.x = message[0]
goal.target_pose.pose.position.y = message[1]
goal.target_pose.pose.orientation.z = message[2]
goal.target_pose.pose.orientation.w = message[3]
time.sleep(0.1)
break
client.send_goal(goal)
wait = client.wait_for_result()
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
else:
return client.get_result()
master_ip = '192.168.0.16'
if __name__ == '__main__':
try:
rospy.init_node('movebase_client_py')
result = movebase_client()
if result:
rospy.loginfo("Goal execution done!")
except rospy.ROSInterruptException:
rospy.loginfo("Navigation test finished.") | 27.403846 | 75 | 0.678596 |
0209b4af6278f957eb3b5fc8d6902d58007a6ca6 | 3,461 | py | Python | plums/plot/engine/utils.py | alexandreMayerowitz/playground-plums | a6be79e4c30c7abcbade5581f052a4e8035a2057 | [
"MIT"
] | null | null | null | plums/plot/engine/utils.py | alexandreMayerowitz/playground-plums | a6be79e4c30c7abcbade5581f052a4e8035a2057 | [
"MIT"
] | null | null | null | plums/plot/engine/utils.py | alexandreMayerowitz/playground-plums | a6be79e4c30c7abcbade5581f052a4e8035a2057 | [
"MIT"
] | 2 | 2021-02-03T12:37:53.000Z | 2022-03-09T03:48:12.000Z | import PIL.ImageFont
import numpy as np
from plums.commons import Path
def get_text_color(background_color):
"""Select the appropriate text color (black or white) based on the luminance of the background color.
Arguments:
background_color (tuple): The record color (RGB or RGBA format).
Returns:
tuple: The chosen text color (black or white).
"""
# Counting the perceptive luminance - human eye favors green color...
luminance = (0.299 * background_color[0] + 0.587 * background_color[1] + 0.114 * background_color[2]) / 255.
# Set text color to white on "dark" backgrounds and dark color on "light" background
if luminance <= 0.5:
return 255, 255, 255
return 0, 0, 0
def get_outline_color(background_color):
"""Select the appropriate text color (black or white) based on the luminance of the background color.
Arguments:
background_color (tuple): The record color (RGB or RGBA format).
Returns:
tuple: The chosen text color (black or white).
"""
# Counting the perceptive luminance - human eye favors green color...
luminance = (0.299 * background_color[0] + 0.587 * background_color[1] + 0.114 * background_color[2]) / 255.
# Set text color to white on "dark" backgrounds and dark color on "light" background
if luminance <= 0.5:
return 150, 150, 150
return 100, 100, 100
def get_default_font(text_size):
"""Get a default font to render targets with text.
Args:
text_size (int): text size in pixels
Returns:
:class:`~PIL.ImageFont.FreeTypeFont`
"""
assert isinstance(text_size, int) and text_size > 0, "Text size should be positive integer"
return PIL.ImageFont.truetype(font=str(Path(__file__)[:-1] / "fonts" / "arial.ttf"), size=text_size)
def dict_equal(dictionary, other_dictionary):
"""Compare two dict with :class:`numpy.ndarray` value handling.
Comparison is made in two parts:
* We first check that both dict have the same keys.
* If they do we then compare each value pair and lazily return ``False`` if any comparison fails.
Note:
Value comparison implicitly delegates to the :meth:`__eq__` method of singular elements and avoids explicit
type-check. Although this is somewhat slower as it involves potentially long element-wise equality checks,
it allows for duck-typing and rich type handling by particular classes.
Args:
dictionary (dict): A :class:`dict` to compare with another.
other_dictionary (dict): A :class:`dict` to compare with another.
Returns:
bool: :any:`True` if the two dict are equal in keys and content.
"""
if set(dictionary.keys()) == set(other_dictionary.keys()):
for key, value in dictionary.items():
if isinstance(value, np.ndarray) or isinstance(other_dictionary[key], np.ndarray):
# If an ndarray we need explicitly go through np.all() to compare
if not np.all(value == other_dictionary[key]):
# If not the same lazily exit now
return False
else:
# Otherwise we delegate to cmp()
if not value == other_dictionary[key]:
# If not the same lazily exit now
return False
# All equal, return True
return True
# Not even the same keys, return False
return False
| 37.215054 | 115 | 0.656747 |
5f9e930332db429d1f43fad19d56985259e610c2 | 2,352 | py | Python | main/model/post.py | daspots/dasapp | 15b113f86842fb8dcef71780a8bda23618427ab3 | [
"MIT"
] | null | null | null | main/model/post.py | daspots/dasapp | 15b113f86842fb8dcef71780a8bda23618427ab3 | [
"MIT"
] | null | null | null | main/model/post.py | daspots/dasapp | 15b113f86842fb8dcef71780a8bda23618427ab3 | [
"MIT"
] | null | null | null | from google.appengine.ext import ndb
import model
from api import fields
import flask
import util
class Post(model.Base):
user_key = ndb.KeyProperty(kind=model.User, required=True)
title = ndb.StringProperty(required=True)
content = ndb.StringProperty(default='')
image_ids_string = ndb.StringProperty(default='')
keywords = ndb.StringProperty(default='')
location_keywords = ndb.StringProperty(default='')
blob_key = ndb.BlobKeyProperty()
name = ndb.StringProperty()
bucket_name = ndb.StringProperty()
image_url = ndb.StringProperty(default='')
content_type = ndb.StringProperty(default='')
size = ndb.IntegerProperty(default=0)
img_ids = ndb.IntegerProperty(repeated=True)
recommender = ndb.StringProperty(default='')
recommender_lower = ndb.StringProperty(default='')
website = ndb.StringProperty(default='')
adress = ndb.StringProperty(default='')
keyword_list = ndb.StringProperty(repeated=True)
location_keyword_list = ndb.StringProperty(repeated=True)
starred = ndb.BooleanProperty(default=False)
@ndb.ComputedProperty
def size_human(self):
return util.size_human(self.size or 0)
@property
def download_url(self):
if self.key:
return flask.url_for(
'resource_download', resource_id=self.key.id(), _external=True
)
return None
@property
def view_url(self):
if self.key:
return flask.url_for(
'resource_view', resource_id=self.key.id(), _external=True,
)
return None
@property
def serve_url(self):
return '%s/serve/%s' % (flask.request.url_root[:-1], self.blob_key)
FIELDS = {
'bucket_name': fields.String,
'content_type': fields.String,
'download_url': fields.String,
'image_url': fields.String,
'name': fields.String,
'serve_url': fields.String,
'size': fields.Integer,
'size_human': fields.String,
'view_url': fields.String,
}
FIELDS.update(model.Base.FIELDS)
@ndb.ComputedProperty
def recommender_url(self):
recommender = model.Recommender.query(model.Recommender.name == self.recommender).get()
return flask.url_for(
'recommender_view', recommender_id=recommender.key.id(), _external=True
)
@ndb.ComputedProperty
def google_maps_directions(self):
return 'https://www.google.com/maps?saddr=My+Location&daddr=' + self.adress.replace(' ', '+')
| 30.545455 | 97 | 0.713861 |
3cd9b2415637404fd56d373edd7170be4022f19e | 1,379 | py | Python | setup.py | neurobin/phantomjspy | 45ecd242a200e55a6efb0b5596ef81950766b5c9 | [
"BSD-3-Clause"
] | 10 | 2019-12-21T21:34:30.000Z | 2021-11-27T01:23:58.000Z | setup.py | neurobin/phantomjspy | 45ecd242a200e55a6efb0b5596ef81950766b5c9 | [
"BSD-3-Clause"
] | 4 | 2020-03-31T17:29:31.000Z | 2021-05-28T05:18:28.000Z | setup.py | neurobin/phantomjspy | 45ecd242a200e55a6efb0b5596ef81950766b5c9 | [
"BSD-3-Clause"
] | 4 | 2019-12-21T21:42:25.000Z | 2021-05-27T19:44:57.000Z | # -*- coding: utf-8 -*-
import os
import sys
from codecs import open
from setuptools import setup
sys.path[0:0] = ['phantomjs']
from version import __version__
def get_readme(filename):
content = ""
try:
with open(os.path.join(os.path.dirname(__file__), filename), 'r', encoding='utf-8') as readme:
content = readme.read()
except Exception as e:
pass
return content
setup(name="phantomjs",
version=__version__,
author="Md. Jahidul Hamid",
author_email="jahidulhamid@yahoo.com",
description="Python wrapper for PhantomJS",
license="BSD",
keywords="markdown include local remote file",
url="https://github.com/neurobin/phantomjspy",
packages=["phantomjs"],
long_description=get_readme("README.md"),
long_description_content_type="text/markdown",
classifiers=[
# See: https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=[],
include_package_data=True,
test_suite="phantomjs.testio")
| 31.340909 | 102 | 0.649746 |
4c6e9ddbc79a972e7dc7e01844b44aaf8cc45918 | 846 | py | Python | scripts/old/show_calib_points.py | strawlab/flyvr | 335892cae740e53e82e07b526e1ba53fbd34b0ce | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 | 2015-01-29T14:09:25.000Z | 2016-04-24T04:25:49.000Z | scripts/old/show_calib_points.py | strawlab/flyvr | 335892cae740e53e82e07b526e1ba53fbd34b0ce | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | scripts/old/show_calib_points.py | strawlab/flyvr | 335892cae740e53e82e07b526e1ba53fbd34b0ce | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | import os.path
import pickle
import sys
import json
import roslib;
roslib.load_manifest('freemovr_engine')
roslib.load_manifest('std_msgs')
import rospy
from std_msgs.msg import UInt32, String
if __name__ == "__main__":
rospy.init_node('showcalibpoints', anonymous=True)
pub_pts = rospy.Publisher('/multicamselfcal_everything/points', String)
pub_resolution = rospy.Publisher('/multicamselfcal_everything/resolution', String)
d = os.path.abspath(os.path.expanduser(sys.argv[1]))
with open(os.path.join(d,'results.pkl'),'r') as f:
results = pickle.load(f)
with open(os.path.join(d,'resolution.pkl'),'r') as f:
resolution = pickle.load(f)
while not rospy.is_shutdown():
pub_pts.publish(json.dumps(results))
pub_resolution.publish(json.dumps(resolution))
rospy.sleep(0.5)
| 26.4375 | 86 | 0.710402 |
d49e5621a2e119e5f52b80b4c9791e0bf19cbb31 | 4,150 | py | Python | paypal/response.py | mcordes/paypal-python | 86512481b5cd3700e13a601f193bb574b9338a43 | [
"Apache-2.0"
] | 1 | 2016-01-20T14:12:31.000Z | 2016-01-20T14:12:31.000Z | paypal/response.py | mcordes/paypal-python | 86512481b5cd3700e13a601f193bb574b9338a43 | [
"Apache-2.0"
] | null | null | null | paypal/response.py | mcordes/paypal-python | 86512481b5cd3700e13a601f193bb574b9338a43 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
PayPalResponse parsing and processing.
"""
import logging
from pprint import pformat
from paypal.compat import is_py3, is_py25
if is_py3:
#noinspection PyUnresolvedReferences
import urllib.parse
#noinspection PyUnresolvedReferences
parse_qs = urllib.parse.parse_qs
elif is_py25:
import cgi
#noinspection PyUnresolvedReferences, PyDeprecation
parse_qs = cgi.parse_qs
else:
# Python 2.6 and up (but not 3.0) have urlparse.parse_qs, which is copied
# from Python 2.5's cgi.parse_qs.
import urlparse
#noinspection PyUnresolvedReferences, PyDeprecation
parse_qs = urlparse.parse_qs
logger = logging.getLogger('paypal.response')
class PayPalResponse(object):
"""
Parse and prepare the reponse from PayPal's API. Acts as somewhat of a
glorified dictionary for API responses.
NOTE: Don't access self.raw directly. Just do something like
PayPalResponse.someattr, going through PayPalResponse.__getattr__().
"""
def __init__(self, query_string, config):
"""
query_string is the response from the API, in NVP format. This is
parseable by urlparse.parse_qs(), which sticks it into the
:attr:`raw` dict for retrieval by the user.
:param str query_string: The raw response from the API server.
:param PayPalConfig config: The config object that was used to send
the query that caused this response.
"""
# A dict of NVP values. Don't access this directly, use
# PayPalResponse.attribname instead. See self.__getattr__().
self.raw = parse_qs(query_string)
self.config = config
logger.debug("PayPal NVP API Response:\n%s" % self.__str__())
def __str__(self):
"""
Returns a string representation of the PayPalResponse object, in
'pretty-print' format.
:rtype: str
:returns: A 'pretty' string representation of the response dict.
"""
return pformat(self.raw)
def __getattr__(self, key):
"""
Handles the retrieval of attributes that don't exist on the object
already. This is used to get API response values. Handles some
convenience stuff like discarding case and checking the cgi/urlparsed
response value dict (self.raw).
:param str key: The response attribute to get a value for.
:rtype: str
:returns: The requested value from the API server's response.
"""
# PayPal response names are always uppercase.
key = key.upper()
try:
value = self.raw[key]
if len(value) == 1:
# For some reason, PayPal returns lists for all of the values.
# I'm not positive as to why, so we'll just take the first
# of each one. Hasn't failed us so far.
return value[0]
return value
except KeyError:
# The requested value wasn't returned in the response.
raise AttributeError(self)
def __getitem__(self, key):
"""
Another (dict-style) means of accessing response data.
:param str key: The response key to get a value for.
:rtype: str
:returns: The requested value from the API server's response.
"""
# PayPal response names are always uppercase.
key = key.upper()
value = self.raw[key]
if len(value) == 1:
# For some reason, PayPal returns lists for all of the values.
# I'm not positive as to why, so we'll just take the first
# of each one. Hasn't failed us so far.
return value[0]
return value
def success(self):
"""
Checks for the presence of errors in the response. Returns ``True`` if
all is well, ``False`` otherwise.
:rtype: bool
:returns ``True`` if PayPal says our query was successful.
"""
return self.ack.upper() in (self.config.ACK_SUCCESS,
self.config.ACK_SUCCESS_WITH_WARNING)
success = property(success)
| 35.775862 | 78 | 0.629398 |
bc9b38aa93978a9c5a2ff6d24ac4f1e6be8b4faa | 1,888 | py | Python | third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py | Ivy286/cluster_basedfps | 7fc216537f570436f008ea567c137d03ba2b6d81 | [
"WTFPL"
] | 9 | 2019-04-23T01:46:12.000Z | 2021-08-16T07:07:12.000Z | third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py | Ivy286/cluster_basedfps | 7fc216537f570436f008ea567c137d03ba2b6d81 | [
"WTFPL"
] | null | null | null | third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py | Ivy286/cluster_basedfps | 7fc216537f570436f008ea567c137d03ba2b6d81 | [
"WTFPL"
] | 5 | 2016-09-21T03:47:48.000Z | 2019-07-30T22:17:35.000Z | #
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for compound descriptors
"""
from __future__ import print_function
import unittest
import Parser
from rdkit.six.moves import xrange
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
self.piece1 = [['d1','d2'],['d1','d2']]
self.aDict = {'Fe':{'d1':1,'d2':2},'Pt':{'d1':10,'d2':20}}
self.pDict = {'d1':100.,'d2':200.}
self.compos = [('Fe',1),('Pt',1)]
self.cExprs = ["SUM($1)","SUM($1)+SUM($2)","MEAN($1)","DEV($2)","MAX($1)","MIN($2)","SUM($1)/$a"]
self.results = [11.,33.,5.5,9.,10.,2.,0.11]
self.tol = 0.0001
def testSingleCalcs(self):
" testing calculation of a single descriptor "
for i in xrange(len(self.cExprs)):
cExpr= self.cExprs[i]
argVect = self.piece1 + [cExpr]
res = Parser.CalcSingleCompoundDescriptor(self.compos,argVect,self.aDict,self.pDict)
self.assertAlmostEqual(res,self.results[i],2)
def testMultipleCalcs(self):
" testing calculation of multiple descriptors "
for i in xrange(len(self.cExprs)):
cExpr= self.cExprs[i]
argVect = self.piece1 + [cExpr]
res = Parser.CalcMultipleCompoundsDescriptor([self.compos,self.compos],argVect,
self.aDict,[self.pDict,self.pDict])
self.assertAlmostEqual(res[0],self.results[i],2)
self.assertAlmostEqual(res[1],self.results[i],2)
#self.assertTrue(abs(res[0]-self.results[i])<self.tol,'Expression %s failed'%(cExpr))
#self.assertTrue((res[1]-self.results[i])<self.tol,'Expression %s failed'%(cExpr))
def TestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCase('testSingleCalcs'))
suite.addTest(TestCase('testMultipleCalcs'))
return suite
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
| 35.622642 | 101 | 0.64036 |
636b9f2d74ec64a3e0ae3ba3877d873a2e5e3f8d | 2,885 | py | Python | pyexlatex/logic/output/api/exc_handler/main.py | whoopnip/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 4 | 2020-06-08T07:17:12.000Z | 2021-11-04T21:39:52.000Z | pyexlatex/logic/output/api/exc_handler/main.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 24 | 2020-02-17T17:20:44.000Z | 2021-12-20T00:10:19.000Z | pyexlatex/logic/output/api/exc_handler/main.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | null | null | null | from typing import List, Optional, Dict, Any
import warnings
from pyexlatex.logic.output.api.formats import OutputFormats
from pyexlatex.logic.output.errors.exc import (
LatexException
)
from pyexlatex.logic.output.api.exc_handler.prepend.main import handle_prepend_exceptions
from pyexlatex.logic.output.api.exc_handler.prepend.typing import PrependKwargsDict, PrependItemsDict
class APIExceptionHandler:
def __init__(self, exceptions: List[LatexException], orig_exception: Exception, latex_str: str,
prepend_kwargs_dict: PrependKwargsDict = None, prepend_items_dict: PrependItemsDict = None,
retries_remaining: int = 3, output_format: OutputFormats = OutputFormats.PDF, **latex_kwargs):
self.exceptions = exceptions
self.orig_exception = orig_exception
self.latex_str = latex_str
self.prepend_kwargs_dict = prepend_kwargs_dict
self.prepend_items_dict = prepend_items_dict
self.retries_remaining = retries_remaining
self.output_format = output_format
self.latex_kwargs = latex_kwargs
def handle_exceptions(self):
from pyexlatex.logic.output.api.main import latex_str_to_obj
if not self.exceptions:
# Got LatexBuildError, but could not extract any exceptions from it. Something is going wrong
# Seems like it might be some intermittent issue, try retrying
if self.retries_remaining > 0:
warnings.warn('got empty latex build error. trying to create pdf again')
return latex_str_to_obj(
self.latex_str,
output_format=self.output_format,
retries_remaining=self.retries_remaining - 1,
prepend_items_dict=self.prepend_items_dict,
prepend_kwargs_dict=self.prepend_kwargs_dict,
**self.latex_kwargs
)
raise LatexException(self.orig_exception)
prepend_items_dict, prepend_kwarg_dict, unhandled_exceptions = handle_prepend_exceptions(
self.exceptions, self.prepend_kwargs_dict, self.prepend_items_dict
)
# TODO [#10]: handle other exceptions
#
# Not actually sure what this is for looking back at the code, but upon closer review
# perhaps some additional exception handling will be needed.
if len(unhandled_exceptions) == len(self.exceptions):
# was not able to handle any exceptions, so retrying would be of no use
raise LatexException(self.exceptions)
return latex_str_to_obj(
self.latex_str,
self.output_format,
retries_remaining=self.retries_remaining,
prepend_items_dict=prepend_items_dict,
prepend_kwargs_dict=prepend_kwarg_dict,
**self.latex_kwargs
)
| 41.811594 | 111 | 0.683189 |
274dec8482448a2650a538a25659ee7a3881eb99 | 411 | py | Python | File.numpy-titanic-corrupt-data.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 4 | 2019-05-04T00:33:25.000Z | 2021-05-29T20:37:59.000Z | File.numpy-titanic-corrupt-data.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | null | null | null | File.numpy-titanic-corrupt-data.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 3 | 2020-05-05T13:14:28.000Z | 2022-02-03T16:18:37.000Z | # Import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
# Assign filename: file
file = 'titanic_corrupt.txt'
# Import file: data
data = pd.read_csv(file, sep="\t", comment='#', na_values="Nothing")
# Print the head of the DataFrame
print(data.head())
# Plot 'Age' variable in a histogram
pd.DataFrame.hist(data[['Age']])
plt.xlabel('Age (years)')
plt.ylabel('count')
plt.show() | 22.833333 | 68 | 0.717762 |
dcd4c38d996e579e04eccd1aabd63b16bee7b5c4 | 5,621 | py | Python | mars/tensor/linalg/qr.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | 2 | 2019-03-29T04:11:10.000Z | 2020-07-08T10:19:54.000Z | mars/tensor/linalg/qr.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | null | null | null | mars/tensor/linalg/qr.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.linalg import LinAlgError
from ... import opcodes as OperandDef
from ...serialize import KeyField, StringField
from ...core import ExecutableTuple
from ..array_utils import device, as_same_device
from ..datasource import tensor as astensor
from ..operands import TensorHasInput, TensorOperandMixin
from ..core import TensorOrder
from .core import SFQR, TSQR
class TensorQR(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.QR
_input = KeyField('input')
_method = StringField('method')
def __init__(self, method=None, dtype=None, **kw):
super().__init__(_method=method, _dtype=dtype, **kw)
@property
def method(self):
return self._method
@property
def output_limit(self):
return 2
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, a):
a = astensor(a)
if a.ndim != 2:
raise LinAlgError(f'{a.ndim}-dimensional tensor given. '
'Tensor must be two-dimensional')
tiny_q, tiny_r = np.linalg.qr(np.ones((1, 1), dtype=a.dtype))
x, y = a.shape
q_shape, r_shape = (a.shape, (y, y)) if x > y else ((x, x), a.shape)
q, r = self.new_tensors([a],
kws=[{'side': 'q', 'dtype': tiny_q.dtype,
'shape': q_shape, 'order': TensorOrder.C_ORDER},
{'side': 'r', 'dtype': tiny_r.dtype,
'shape': r_shape, 'order': TensorOrder.C_ORDER}])
return ExecutableTuple([q, r])
@classmethod
def tile(cls, op):
q, r = op.outputs
q_dtype, r_dtype = q.dtype, r.dtype
q_shape, r_shape = q.shape, r.shape
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
qr_chunks = chunk_op.new_chunks([in_chunk], kws=[
{'side': 'q', 'shape': q_shape, 'index': in_chunk.index},
{'side': 'r', 'shape': r_shape, 'index': in_chunk.index}
])
q_chunk, r_chunk = qr_chunks
new_op = op.copy()
kws = [
{'chunks': [q_chunk], 'nsplits': ((q_shape[0],), (q_shape[1],)),
'dtype': q_dtype, 'shape': q_shape, 'order': q.order},
{'chunks': [r_chunk], 'nsplits': ((r_shape[0],), (r_shape[1],)),
'dtype': r_dtype, 'shape': r_shape, 'order': r.order}
]
return new_op.new_tensors(op.inputs, kws=kws)
elif op.method == 'tsqr':
return TSQR.tile(op)
elif op.method == 'sfqr':
return SFQR.tile(op)
else:
raise NotImplementedError('Only tsqr method supported for now')
@classmethod
def execute(cls, ctx, op):
(a,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
q, r = xp.linalg.qr(a)
qc, rc = op.outputs
ctx[qc.key] = q
ctx[rc.key] = r
def qr(a, method='tsqr'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
method: {'tsqr', 'sfqr'}, optional
method to calculate qr factorization, tsqr as default
TSQR is presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
FSQR is a QR decomposition for fat and short matrix:
A = [A1, A2, A3, ...], A1 may be decomposed as A1 = Q1 * R1,
for A = Q * R, Q = Q1, R = [R1, R2, R3, ...] where A2 = Q1 * R2, A3 = Q1 * R3, ...
Returns
-------
q : Tensor of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : Tensor of float or complex, optional
The upper-triangular matrix.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.random.randn(9, 6)
>>> q, r = mt.linalg.qr(a)
>>> mt.allclose(a, mt.dot(q, r)).execute() # a does equal qr
True
"""
op = TensorQR(method=method)
return op(a)
| 33.260355 | 94 | 0.579968 |
86499a5a2d95d2448ec08232fe59d054d7c5b9c9 | 3,334 | py | Python | auth0/v3/management/client_grants.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
] | 340 | 2015-06-05T12:32:26.000Z | 2022-03-30T18:41:30.000Z | auth0/v3/management/client_grants.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
] | 179 | 2015-05-26T00:35:07.000Z | 2022-03-18T17:16:37.000Z | auth0/v3/management/client_grants.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
] | 151 | 2015-01-27T11:49:01.000Z | 2022-03-03T14:26:09.000Z | from .rest import RestClient
class ClientGrants(object):
"""Auth0 client grants endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
rest_options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries.
(defaults to None)
"""
def __init__(self, domain, token, telemetry=True, timeout=5.0, protocol="https", rest_options=None):
self.domain = domain
self.protocol = protocol
self.client = RestClient(jwt=token, telemetry=telemetry, timeout=timeout, options=rest_options)
def _url(self, id=None):
url = '{}://{}/api/v2/client-grants'.format(self.protocol, self.domain)
if id is not None:
return '{}/{}'.format(url, id)
return url
def all(self, audience=None, page=None, per_page=None, include_totals=False, client_id=None):
"""Retrieves all client grants.
Args:
audience (str, optional): URL encoded audience of a Resource Server
to filter.
page (int, optional): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to False.
client_id (string, optional): The id of a client to filter.
See: https://auth0.com/docs/api/management/v2#!/Client_Grants/get_client_grants
"""
params = {
'audience': audience,
'page': page,
'per_page': per_page,
'include_totals': str(include_totals).lower(),
'client_id': client_id,
}
return self.client.get(self._url(), params=params)
def create(self, body):
"""Creates a client grant.
Args:
body (dict): Attributes for the new client grant.
See: https://auth0.com/docs/api/management/v2#!/Client_Grants/post_client_grants
"""
return self.client.post(self._url(), data=body)
def delete(self, id):
"""Deletes a client grant.
Args:
id (str): Id of client grant to delete.
See: https://auth0.com/docs/api/management/v2#!/Client_Grants/delete_client_grants_by_id
"""
return self.client.delete(self._url(id))
def update(self, id, body):
"""Modifies a client grant.
Args:
id (str): The id of the client grant to modify.
body (dict): Attributes to update.
See: https://auth0.com/docs/api/management/v2#!/Client_Grants/patch_client_grants_by_id
"""
return self.client.patch(self._url(id), data=body)
| 32.686275 | 104 | 0.609178 |
3d70a678d3dcc59d925ad0c7d6c15a01f05a3442 | 13,709 | py | Python | nova/objects/virtual_interface.py | gmannos/nova-rbac-policy-new-defaults | 7c61b4bd19b97011d7581866c0262065e52d7865 | [
"Apache-2.0"
] | null | null | null | nova/objects/virtual_interface.py | gmannos/nova-rbac-policy-new-defaults | 7c61b4bd19b97011d7581866c0262065e52d7865 | [
"Apache-2.0"
] | 1 | 2021-03-31T19:29:01.000Z | 2021-03-31T19:29:01.000Z | nova/objects/virtual_interface.py | gmannos/nova-rbac-policy-new-defaults | 7c61b4bd19b97011d7581866c0262065e52d7865 | [
"Apache-2.0"
] | 1 | 2020-07-22T22:14:40.000Z | 2020-07-22T22:14:40.000Z | # Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import versionutils
from nova import context as nova_context
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
VIF_OPTIONAL_FIELDS = ['network_id']
FAKE_UUID = '00000000-0000-0000-0000-000000000000'
@base.NovaObjectRegistry.register
class VirtualInterface(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add tag field
# Version 1.2: Adding a save method
# Version 1.3: Added destroy() method
VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
# This is a MAC address.
'address': fields.StringField(nullable=True),
'network_id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'uuid': fields.UUIDField(),
'tag': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'tag' in primitive:
del primitive['tag']
@staticmethod
def _from_db_object(context, vif, db_vif):
for field in vif.fields:
if not db_vif[field] and field in VIF_OPTIONAL_FIELDS:
continue
else:
setattr(vif, field, db_vif[field])
# NOTE(danms): The neutronv2 module namespaces mac addresses
# with port id to avoid uniqueness constraints currently on
# our table. Strip that out here so nobody else needs to care.
if 'address' in vif and '/' in vif.address:
vif.address, _ = vif.address.split('/', 1)
vif._context = context
vif.obj_reset_changes()
return vif
@base.remotable_classmethod
def get_by_id(cls, context, vif_id):
db_vif = db.virtual_interface_get(context, vif_id)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable_classmethod
def get_by_uuid(cls, context, vif_uuid):
db_vif = db.virtual_interface_get_by_uuid(context, vif_uuid)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable_classmethod
def get_by_address(cls, context, address):
db_vif = db.virtual_interface_get_by_address(context, address)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable_classmethod
def get_by_instance_and_network(cls, context, instance_uuid, network_id):
db_vif = db.virtual_interface_get_by_instance_and_network(context,
instance_uuid, network_id)
if db_vif:
return cls._from_db_object(context, cls(), db_vif)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_vif = db.virtual_interface_create(self._context, updates)
self._from_db_object(self._context, self, db_vif)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if 'address' in updates:
raise exception.ObjectActionError(action='save',
reason='address is not mutable')
db_vif = db.virtual_interface_update(self._context, self.address,
updates)
return self._from_db_object(self._context, self, db_vif)
@base.remotable_classmethod
def delete_by_instance_uuid(cls, context, instance_uuid):
db.virtual_interface_delete_by_instance(context, instance_uuid)
@base.remotable
def destroy(self):
db.virtual_interface_delete(self._context, self.id)
@base.NovaObjectRegistry.register
class VirtualInterfaceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('VirtualInterface'),
}
@base.remotable_classmethod
def get_all(cls, context):
db_vifs = db.virtual_interface_get_all(context)
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
@staticmethod
@db.select_db_reader_mode
def _db_virtual_interface_get_by_instance(context, instance_uuid,
use_subordinate=False):
return db.virtual_interface_get_by_instance(context, instance_uuid)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False):
db_vifs = cls._db_virtual_interface_get_by_instance(
context, instance_uuid, use_subordinate=use_subordinate)
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
@db_api.api_context_manager.writer
def fill_virtual_interface_list(context, max_count):
"""This fills missing VirtualInterface Objects in Nova DB"""
count_hit = 0
count_all = 0
def _regenerate_vif_list_base_on_cache(context,
instance,
old_vif_list,
nw_info):
# Set old VirtualInterfaces as deleted.
for vif in old_vif_list:
vif.destroy()
# Generate list based on current cache:
for vif in nw_info:
vif_obj = objects.VirtualInterface(context)
vif_obj.uuid = vif['id']
vif_obj.address = "%s/%s" % (vif['address'], vif['id'])
vif_obj.instance_uuid = instance['uuid']
# Find tag from previous VirtualInterface object if exist.
old_vif = [x for x in old_vif_list if x.uuid == vif['id']]
vif_obj.tag = old_vif[0].tag if len(old_vif) > 0 else None
vif_obj.create()
cells = objects.CellMappingList.get_all(context)
for cell in cells:
if count_all == max_count:
# We reached the limit of checked instances per
# this function run.
# Stop, do not go to other cell.
break
with nova_context.target_cell(context, cell) as cctxt:
marker = _get_marker_for_migrate_instances(cctxt)
filters = {'deleted': False}
# Adjust the limit of migrated instances.
# If user wants to process a total of 100 instances
# and we did a 75 in cell1, then we only need to
# verify 25 more in cell2, no more.
adjusted_limit = max_count - count_all
instances = objects.InstanceList.get_by_filters(
cctxt,
filters=filters,
sort_key='created_at',
sort_dir='asc',
marker=marker,
limit=adjusted_limit)
for instance in instances:
# We don't want to fill vif for FAKE instance.
if instance.uuid == FAKE_UUID:
continue
try:
info_cache = objects.InstanceInfoCache.\
get_by_instance_uuid(cctxt, instance.get('uuid'))
if not info_cache.network_info:
LOG.info('InstanceInfoCache object has not set '
'NetworkInfo field. '
'Skipping build of VirtualInterfaceList.')
continue
except exception.InstanceInfoCacheNotFound:
LOG.info('Instance has no InstanceInfoCache object. '
'Skipping build of VirtualInterfaceList for it.')
continue
# It by design filters out deleted vifs.
vif_list = VirtualInterfaceList.\
get_by_instance_uuid(cctxt, instance.get('uuid'))
nw_info = info_cache.network_info
# This should be list with proper order of vifs,
# but we're not sure about that.
cached_vif_ids = [vif['id'] for vif in nw_info]
# This is ordered list of vifs taken from db.
db_vif_ids = [vif.uuid for vif in vif_list]
count_all += 1
if cached_vif_ids == db_vif_ids:
# The list of vifs and its order in cache and in
# virtual_interfaces is the same. So we could end here.
continue
elif len(db_vif_ids) < len(cached_vif_ids):
# Seems to be an instance from release older than
# Newton and we don't have full VirtualInterfaceList for
# it. Rewrite whole VirtualInterfaceList using interface
# order from InstanceInfoCache.
count_hit += 1
LOG.info('Got an instance %s with less VIFs defined in DB '
'than in cache. Could be Pre-Newton instance. '
'Building new VirtualInterfaceList for it.',
instance.uuid)
_regenerate_vif_list_base_on_cache(cctxt,
instance,
vif_list,
nw_info)
elif len(db_vif_ids) > len(cached_vif_ids):
# Seems vif list is inconsistent with cache.
# it could be a broken cache or interface
# during attach. Do nothing.
LOG.info('Got an unexpected number of VIF records in the '
'database compared to what was stored in the '
'instance_info_caches table for instance %s. '
'Perhaps it is an instance during interface '
'attach. Do nothing.', instance.uuid)
continue
else:
# The order is different between lists.
# We need a source of truth, so rebuild order
# from cache.
count_hit += 1
LOG.info('Got an instance %s with different order of '
'VIFs between DB and cache. '
'We need a source of truth, so rebuild order '
'from cache.', instance.uuid)
_regenerate_vif_list_base_on_cache(cctxt,
instance,
vif_list,
nw_info)
# Set marker to point last checked instance.
if instances:
marker = instances[-1].uuid
_set_or_delete_marker_for_migrate_instances(cctxt, marker)
return count_all, count_hit
# NOTE(mjozefcz): This is similiar to marker mechanism made for
# RequestSpecs object creation.
# Since we have a lot of instances to be check this
# will add a FAKE row that points to last instance
# we checked.
# Please notice that because of virtual_interfaces_instance_uuid_fkey
# we need to have FAKE_UUID instance object, even deleted one.
@db_api.pick_context_manager_writer
def _set_or_delete_marker_for_migrate_instances(context, marker=None):
context.session.query(models.VirtualInterface).filter_by(
instance_uuid=FAKE_UUID).delete()
# Create FAKE_UUID instance objects, only for marker, if doesn't exist.
# It is needed due constraint: virtual_interfaces_instance_uuid_fkey
instance = context.session.query(models.Instance).filter_by(
uuid=FAKE_UUID).first()
if not instance:
instance = objects.Instance(context)
instance.uuid = FAKE_UUID
instance.project_id = FAKE_UUID
instance.user_id = FAKE_UUID
instance.create()
# Thats fake instance, lets destroy it.
# We need only its row to solve constraint issue.
instance.destroy()
if marker is not None:
# ... but there can be a new marker to set
db_mapping = objects.VirtualInterface(context)
db_mapping.instance_uuid = FAKE_UUID
db_mapping.uuid = FAKE_UUID
db_mapping.tag = marker
db_mapping.address = 'ff:ff:ff:ff:ff:ff/%s' % FAKE_UUID
db_mapping.create()
@db_api.pick_context_manager_reader
def _get_marker_for_migrate_instances(context):
vif = (context.session.query(models.VirtualInterface).filter_by(
instance_uuid=FAKE_UUID)).first()
marker = vif['tag'] if vif else None
return marker
| 42.052147 | 81 | 0.598366 |
53140d26a1a0fe8bdc8745cfc8633379c01e96c9 | 7,768 | py | Python | discord/ext/commands/parameters.py | Alpha62579/discord.py | 366c65465a4c3d8b9d3b0b93ef8f07974fbd7a8d | [
"MIT"
] | null | null | null | discord/ext/commands/parameters.py | Alpha62579/discord.py | 366c65465a4c3d8b9d3b0b93ef8f07974fbd7a8d | [
"MIT"
] | null | null | null | discord/ext/commands/parameters.py | Alpha62579/discord.py | 366c65465a4c3d8b9d3b0b93ef8f07974fbd7a8d | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import inspect
from operator import attrgetter
from typing import TYPE_CHECKING, Any, Literal, Optional, OrderedDict, Union, Protocol
from discord.utils import MISSING, maybe_coroutine
from .errors import NoPrivateMessage
from .converter import GuildConverter
from discord import (
Member,
User,
TextChannel,
VoiceChannel,
DMChannel,
Thread,
)
if TYPE_CHECKING:
from typing_extensions import Self
from discord import Guild
from .context import Context
__all__ = (
'Parameter',
'parameter',
'param',
'Author',
'CurrentChannel',
'CurrentGuild',
)
ParamKinds = Union[
Literal[inspect.Parameter.POSITIONAL_ONLY],
Literal[inspect.Parameter.POSITIONAL_OR_KEYWORD],
Literal[inspect.Parameter.VAR_POSITIONAL],
Literal[inspect.Parameter.KEYWORD_ONLY],
Literal[inspect.Parameter.VAR_KEYWORD],
]
empty: Any = inspect.Parameter.empty
def _gen_property(name: str) -> property:
attr = f'_{name}'
return property(
attrgetter(attr),
lambda self, value: setattr(self, attr, value),
doc=f"The parameter's {name}.",
)
class Parameter(inspect.Parameter):
r"""A class that stores information on a :class:`Command`\'s parameter.
This is a subclass of :class:`inspect.Parameter`.
.. versionadded:: 2.0
"""
__slots__ = ('_displayed_default', '_fallback')
def __init__(
self,
name: str,
kind: ParamKinds,
default: Any = empty,
annotation: Any = empty,
displayed_default: str = empty,
) -> None:
super().__init__(name=name, kind=kind, default=default, annotation=annotation)
self._name = name
self._kind = kind
self._default = default
self._annotation = annotation
self._displayed_default = displayed_default
self._fallback = False
def replace(
self,
*,
name: str = MISSING, # MISSING here cause empty is valid
kind: ParamKinds = MISSING,
default: Any = MISSING,
annotation: Any = MISSING,
displayed_default: Any = MISSING,
) -> Self:
if name is MISSING:
name = self._name
if kind is MISSING:
kind = self._kind # type: ignore # this assignment is actually safe
if default is MISSING:
default = self._default
if annotation is MISSING:
annotation = self._annotation
if displayed_default is MISSING:
displayed_default = self._displayed_default
return self.__class__(
name=name,
kind=kind,
default=default,
annotation=annotation,
displayed_default=displayed_default,
)
if not TYPE_CHECKING: # this is to prevent anything breaking if inspect internals change
name = _gen_property('name')
kind = _gen_property('kind')
default = _gen_property('default')
annotation = _gen_property('annotation')
@property
def required(self) -> bool:
""":class:`bool`: Whether this parameter is required."""
return self.default is empty
@property
def converter(self) -> Any:
"""The converter that should be used for this parameter."""
if self.annotation is empty:
return type(self.default) if self.default not in (empty, None) else str
return self.annotation[0] if isinstance(self.annotation, tuple) else self.annotation
@property
def displayed_default(self) -> Optional[str]:
"""Optional[:class:`str`]: The displayed default in :class:`Command.signature`."""
if self._displayed_default is not empty:
return self._displayed_default
return None if self.required else str(self.default)
async def get_default(self, ctx: Context[Any]) -> Any:
"""|coro|
Gets this parameter's default value.
Parameters
----------
ctx: :class:`Context`
The invocation context that is used to get the default argument.
"""
# pre-condition: required is False
if callable(self.default):
return await maybe_coroutine(self.default, ctx) # type: ignore
return self.default
def parameter(
*,
converter: Any = empty,
default: Any = empty,
displayed_default: str = empty,
) -> Any:
r"""parameter(\*, converter=..., default=..., displayed_default=...)
A way to assign custom metadata for a :class:`Command`\'s parameter.
.. versionadded:: 2.0
Examples
--------
A custom default can be used to have late binding behaviour.
.. code-block:: python3
@bot.command()
async def wave(ctx, to: discord.User = commands.parameter(default=lambda ctx: ctx.author)):
await ctx.send(f'Hello {to.mention} :wave:')
Parameters
----------
converter: Any
The converter to use for this parameter, this replaces the annotation at runtime which is transparent to type checkers.
default: Any
The default value for the parameter, if this is a :term:`callable` or a |coroutine_link|_ it is called with a
positional :class:`Context` argument.
displayed_default: :class:`str`
The displayed default in :attr:`Command.signature`.
"""
return Parameter(
name='empty',
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=converter,
default=default,
displayed_default=displayed_default,
)
class ParameterAlias(Protocol):
def __call__(
self,
*,
converter: Any = empty,
default: Any = empty,
displayed_default: str = empty,
) -> Any:
...
param: ParameterAlias = parameter
r"""param(\*, converter=..., default=..., displayed_default=...)
An alias for :func:`parameter`.
.. versionadded:: 2.0
"""
# some handy defaults
Author = parameter(
default=attrgetter('author'),
displayed_default='<you>',
converter=Union[Member, User],
)
Author._fallback = True
CurrentChannel = parameter(
default=attrgetter('channel'),
displayed_default='<this channel>',
converter=Union[TextChannel, DMChannel, Thread, VoiceChannel],
)
CurrentChannel._fallback = True
def default_guild(ctx: Context[Any]) -> Guild:
if ctx.guild is not None:
return ctx.guild
raise NoPrivateMessage()
CurrentGuild = parameter(
default=default_guild,
displayed_default='<this server>',
converter=GuildConverter,
)
class Signature(inspect.Signature):
_parameter_cls = Parameter
parameters: OrderedDict[str, Parameter]
| 28.664207 | 127 | 0.663491 |
0ee3ef2c0a6bfb19a3e07da1f1aa521987b595ee | 1,857 | py | Python | sdk/dashboard/azure-mgmt-dashboard/azure/mgmt/dashboard/models/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-01-24T08:54:57.000Z | 2022-01-24T08:54:57.000Z | sdk/dashboard/azure-mgmt-dashboard/azure/mgmt/dashboard/models/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/dashboard/azure-mgmt-dashboard/azure/mgmt/dashboard/models/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorDetail
from ._models_py3 import ErrorResponse
from ._models_py3 import ManagedGrafana
from ._models_py3 import ManagedGrafanaListResponse
from ._models_py3 import ManagedGrafanaProperties
from ._models_py3 import ManagedGrafanaUpdateParameters
from ._models_py3 import ManagedIdentity
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OperationResult
from ._models_py3 import ResourceSku
from ._models_py3 import SystemData
from ._models_py3 import UserAssignedIdentity
from ._dashboard_management_client_enums import (
ActionType,
AutoGeneratedDomainNameLabelScope,
CreatedByType,
IdentityType,
LastModifiedByType,
Origin,
ProvisioningState,
ZoneRedundancy,
)
__all__ = [
'ErrorAdditionalInfo',
'ErrorDetail',
'ErrorResponse',
'ManagedGrafana',
'ManagedGrafanaListResponse',
'ManagedGrafanaProperties',
'ManagedGrafanaUpdateParameters',
'ManagedIdentity',
'OperationDisplay',
'OperationListResult',
'OperationResult',
'ResourceSku',
'SystemData',
'UserAssignedIdentity',
'ActionType',
'AutoGeneratedDomainNameLabelScope',
'CreatedByType',
'IdentityType',
'LastModifiedByType',
'Origin',
'ProvisioningState',
'ZoneRedundancy',
]
| 30.95 | 94 | 0.712439 |
91ba1b0ad11f14095e103a1cc7944658c9f8ea72 | 3,907 | py | Python | lagom/integrations/fast_api.py | meadsteve/lagom | 025993f42fa23547e333d81373523ced1baf4854 | [
"MIT"
] | 109 | 2019-06-02T13:40:38.000Z | 2022-03-08T18:35:17.000Z | lagom/integrations/fast_api.py | meadsteve/lagom | 025993f42fa23547e333d81373523ced1baf4854 | [
"MIT"
] | 152 | 2019-06-03T11:54:13.000Z | 2022-03-30T11:31:03.000Z | lagom/integrations/fast_api.py | meadsteve/lagom | 025993f42fa23547e333d81373523ced1baf4854 | [
"MIT"
] | 5 | 2020-02-05T09:44:12.000Z | 2022-01-31T08:41:38.000Z | """
FastAPI (https://fastapi.tiangolo.com/)
"""
from contextlib import contextmanager
from typing import TypeVar, Optional, Type, List, Iterator
from fastapi import Depends
from starlette.requests import Request
from ..context_based import ContextContainer
from ..definitions import PlainInstance
from ..interfaces import ExtendableContainer, ReadableContainer, WriteableContainer
from ..updaters import update_container_singletons
T = TypeVar("T")
class FastApiIntegration:
"""
Integration between a container and the FastAPI framework.
Provides a method `Depends` which functions in the same way as
FastApi `Depends`
"""
_container: ExtendableContainer
def __init__(
self,
container: ExtendableContainer,
request_singletons: Optional[List[Type]] = None,
request_context_singletons: Optional[List[Type]] = None,
):
self._container = container
self._request_singletons = request_singletons or []
self._request_context_singletons = request_context_singletons or []
def depends(self, dep_type: Type[T]) -> T:
"""Returns a Depends object which FastAPI understands
:param dep_type:
:return:
"""
def _container_from_request(request: Request) -> Iterator[ReadableContainer]:
"""
We use the state of the request object to store a single instance of the
container. Request level singletons can then be defined on this container.
We only need to construct it once per request. This container is also
wrapped in a ContextContainer which is yielded to fastapi and can call
the __exit__ methods of any context managers used constructing objects
during the requests lifetime.
"""
if (
not hasattr(request.state, "lagom_request_container")
or not request.state.lagom_request_container
):
request.state.lagom_request_container = self._build_container(request)
with request.state.lagom_request_container:
yield request.state.lagom_request_container
else:
# No need to "with" as it's already been done once and this
# will handle the exit
yield request.state.lagom_request_container
def _resolver(
container: ExtendableContainer = Depends(_container_from_request),
):
return container.resolve(dep_type)
return Depends(_resolver)
@contextmanager
def override_for_test(self) -> Iterator[WriteableContainer]:
"""
Returns a ContextManager that returns an editiable container
that will temporarily alter the dependency injection resolution
of all dependencies bound to this container.
client = TestClient(app)
with deps.override_for_test() as test_container:
# FooService is an external API so mock it during test
test_container[FooService] = Mock(FooService)
response = client.get("/")
assert response.status_code == 200
:return:
"""
original = self._container
new_container_for_test = self._container.clone()
self._container = new_container_for_test # type: ignore
try:
yield new_container_for_test
finally:
self._container = original
def _build_container(self, request: Request) -> ContextContainer:
request_container = update_container_singletons(
self._container, self._request_singletons
)
request_container.define(Request, PlainInstance(request))
return ContextContainer(
request_container,
context_types=[],
context_singletons=self._request_context_singletons,
)
| 36.514019 | 86 | 0.658818 |
e4ef43e7a61eac9dd2441e1637c4090adf27b0b0 | 529 | py | Python | tests/http_head/fake_slow_http_server.py | dantangfan/greenify | 794d12f718785607afaeb08f64d11604c8f33bdb | [
"BSD-3-Clause"
] | 5 | 2017-06-22T05:29:57.000Z | 2021-06-22T08:56:54.000Z | tests/http_head/fake_slow_http_server.py | dantangfan/greenify | 794d12f718785607afaeb08f64d11604c8f33bdb | [
"BSD-3-Clause"
] | null | null | null | tests/http_head/fake_slow_http_server.py | dantangfan/greenify | 794d12f718785607afaeb08f64d11604c8f33bdb | [
"BSD-3-Clause"
] | 1 | 2018-04-18T03:41:51.000Z | 2018-04-18T03:41:51.000Z | # coding: utf-8
import time
from SimpleHTTPServer import SimpleHTTPRequestHandler
import SocketServer
PORT = 0x2304
BLOCKING_SECONDS = 10 # seconds
class Server(SocketServer.TCPServer):
allow_reuse_address = True
class Handler(SimpleHTTPRequestHandler):
def do_HEAD(self):
time.sleep(BLOCKING_SECONDS)
return SimpleHTTPRequestHandler.do_HEAD(self)
if __name__ == '__main__':
httpd = Server(("", PORT), Handler)
try:
httpd.serve_forever()
except:
httpd.server_close()
| 18.892857 | 53 | 0.714556 |
ef6036678a32909f8fb9a890f733d3c069c78932 | 653 | py | Python | python/oneflow/nn/graph/__init__.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/graph/__init__.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/graph/__init__.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .graph import Graph
from .block import Block, BlockConfig
| 36.277778 | 72 | 0.787136 |
8624d73e27b8ec71c0b7211d904f23f882295612 | 1,553 | py | Python | music_player/cmd.py | pontoniento/terplayer | 4d095db1d5e6943ab975acc07930066a3cc6589b | [
"MIT"
] | null | null | null | music_player/cmd.py | pontoniento/terplayer | 4d095db1d5e6943ab975acc07930066a3cc6589b | [
"MIT"
] | null | null | null | music_player/cmd.py | pontoniento/terplayer | 4d095db1d5e6943ab975acc07930066a3cc6589b | [
"MIT"
] | null | null | null | import sys
from pathlib import Path
from music_player.player import MusicController
class CmdApp:
BASIC_PROMPT: str = 'Music Player'
def __init__(self):
self.music_controller: MusicController = MusicController()
self.prompt: str = CmdApp.BASIC_PROMPT
def cmd_loop(self) -> None:
while True:
self.clear_screen()
if self.music_controller.currently_playing and self.music_controller.currently_playing.is_playing():
self.prompt = self.music_controller.get_song_name()
sys.stdout.write(self.prompt + '>')
tokens = input().split()
if not tokens:
continue
command = tokens[0]
if command in ['quit', 'exit', 'break', 'q', 'e']:
break
elif command in ['play', 'playsong', 'pl']:
song_name = ' '.join(tokens[1:])
self.music_controller.download_song(song_name)
elif command in ['playlist']:
playlist = Path(' '.join(tokens[1:]))
self.music_controller.download_playlist(playlist)
elif command in ['stop', 's']:
self.prompt = CmdApp.BASIC_PROMPT
self.music_controller.stop_song()
else:
sys.stdout.write('Unrecognized Command - type help for list of commands.\n')
@classmethod
def clear_screen(cls) -> None:
sys.stdout.write(chr(27) + "[2J")
if __name__ == '__main__':
app = CmdApp()
app.cmd_loop()
| 28.759259 | 112 | 0.57566 |
fc45757b8664498a2e23a9794d77e7cba53e6ed5 | 571 | py | Python | pytest_docs/formatters/restuctured.py | liiight/pytest_docs | bdbd7b3e83f16582cadb66e54aea8070becf3e99 | [
"MIT"
] | 11 | 2018-11-06T14:36:58.000Z | 2022-02-02T13:56:19.000Z | pytest_docs/formatters/restuctured.py | liiight/pytest_docs | bdbd7b3e83f16582cadb66e54aea8070becf3e99 | [
"MIT"
] | 3 | 2018-11-06T10:09:38.000Z | 2020-11-11T07:10:08.000Z | pytest_docs/formatters/restuctured.py | liiight/pytest_docs | bdbd7b3e83f16582cadb66e54aea8070becf3e99 | [
"MIT"
] | 2 | 2019-04-05T12:48:08.000Z | 2020-11-05T07:21:10.000Z | from pytest_docs.formatter import Formatter
class RSTFormatter(Formatter):
name = "rst"
marker_prefix = "\n**Markers:**"
@staticmethod
def module_name_format(element):
return "\n{}\n{}".format(element, "*" * len(element))
@staticmethod
def class_name_format(element):
return "\n{}\n{}".format(element, "-" * len(element))
@staticmethod
def func_name_format(element):
return "\n{}\n{}".format(element, "=" * len(element))
@staticmethod
def marker_format(marker):
return "\n- {}".format(marker)
| 24.826087 | 61 | 0.623468 |
c0ad1f2b3f61d4ce4e41ad80cb57a40219c270c6 | 1,040 | py | Python | training/urls.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 36 | 2019-11-26T11:46:32.000Z | 2022-02-17T13:18:18.000Z | training/urls.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 13 | 2020-02-14T09:30:16.000Z | 2022-03-12T00:58:09.000Z | training/urls.py | Atwinenickson/lendsuphumanresourcemanagement | b46df164d59a4e94300376d679e07bd9a60d6343 | [
"MIT",
"Unlicense"
] | 16 | 2019-06-14T12:11:29.000Z | 2022-02-14T15:16:07.000Z | from django.urls import path
from training import views
urlpatterns = [
path('user_training_page', views.user_training_page, name="user_training_page"),
path('schedule_training', views.schedule_training_page, name="schedule_training_page"),
path('edit_training_schedule/<int:training_schedule_id>/', views.edit_training_schedule,
name="edit_training_schedule"),
path('delete_training_schedule/<int:training_schedule_id>/', views.delete_training_schedule,
name="delete_training_schedule"),
path('training_schedules_page', views.training_schedules_page, name="training_schedules_page"),
path('approve_training_page', views.approve_training_page, name="approve_training_page"),
# Process
path('reject_training_application/<int:training_application_id>', views.reject_training_application,
name="reject_training_application"),
path('approve_training_application/<int:training_application_id>', views.approve_training_application,
name="approve_training_application"),
]
| 52 | 106 | 0.785577 |
214e528e2793af83b13472d08137576fa5aef6b5 | 1,033 | py | Python | ratings/migrations/0011_auto_20211023_1743.py | CommanderStorm/rallyetool-v2 | 721413d6df8afc9347dac7ee83deb3a0ad4c01bc | [
"MIT"
] | 1 | 2021-10-03T17:49:53.000Z | 2021-10-03T17:49:53.000Z | ratings/migrations/0011_auto_20211023_1743.py | FSTUM/rallyetool-v2 | 2f3e2b5cb8655abe023ed1215b7182430b75bb23 | [
"MIT"
] | 9 | 2021-11-23T10:13:43.000Z | 2022-03-01T15:04:15.000Z | ratings/migrations/0011_auto_20211023_1743.py | CommanderStorm/rallyetool-v2 | 721413d6df8afc9347dac7ee83deb3a0ad4c01bc | [
"MIT"
] | 1 | 2021-10-16T09:07:47.000Z | 2021-10-16T09:07:47.000Z | # Generated by Django 3.2.7 on 2021-10-23 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("ratings", "0010_auto_20211023_0238"),
]
operations = [
migrations.AlterField(
model_name="station",
name="latitude",
field=models.FloatField(
default=48.265,
help_text="Visible on the map",
verbose_name="Latitude of the station",
),
),
migrations.AlterField(
model_name="station",
name="longitude",
field=models.FloatField(
default=11.671,
help_text="Visible on the map",
verbose_name="Longitude of the station",
),
),
migrations.AlterField(
model_name="station",
name="tutor_amount",
field=models.PositiveSmallIntegerField(default=2, verbose_name="Amount of tutors needed"),
),
]
| 27.918919 | 102 | 0.54211 |
fc917ad203501288c5914a1f32fcac87439ac416 | 2,224 | py | Python | gwosc/catalog.py | rpfisher/gwosc | 76fa92d731c839adc17f6e80c7d27a847e47b874 | [
"MIT"
] | null | null | null | gwosc/catalog.py | rpfisher/gwosc | 76fa92d731c839adc17f6e80c7d27a847e47b874 | [
"MIT"
] | null | null | null | gwosc/catalog.py | rpfisher/gwosc | 76fa92d731c839adc17f6e80c7d27a847e47b874 | [
"MIT"
] | 4 | 2019-06-06T21:09:00.000Z | 2019-06-06T21:10:05.000Z | # -*- coding: utf-8 -*-
# Copyright Duncan Macleod 2019
#
# This file is part of GWOSC.
#
# GWOSC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWOSC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWOSC. If not, see <http://www.gnu.org/licenses/>.
"""Catalog-parsing functions
"""
from . import (api, utils)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
CACHE = {}
def clear_cache():
global CACHE
CACHE = {}
def download(catalog, host=api.DEFAULT_URL):
try:
return CACHE[catalog]
except KeyError:
return CACHE.setdefault(
catalog,
api.fetch_catalog_json(catalog, host=host)
)
def _nested_values(data):
if isinstance(data, dict):
for key in data:
for item in _nested_values(data[key]):
yield item
else:
yield data
def datasets(catalog, detector=None, segment=None, host=api.DEFAULT_URL):
data = download(catalog, host=host)["data"]
datasets = []
for event, edata in data.items():
files = edata["files"]
revision = files["DataRevisionNum"]
detectors = set(files["OperatingIFOs"].split())
if detector not in detectors | {None}:
continue
urls = [url for det in detectors for url in _nested_values(files[det])]
if segment and not (
urls and
utils.segments_overlap(segment, utils.urllist_extent(urls))
):
continue
datasets.append("{0}_{1}".format(event, revision))
return datasets
def events(catalog, detector=None, segment=None, host=api.DEFAULT_URL):
return [e.rsplit("_", 1)[0] for e in datasets(
catalog,
detector=detector,
segment=segment,
host=host,
)]
| 28.151899 | 79 | 0.646133 |
d8b00c5f2f9ec45c3f6765f3989f0b6167a9be32 | 10,111 | py | Python | tests/beastrun_tests.py | SimonGreenhill/BEASTling | b3da97240e632ac720f210cce4477fb29d49de84 | [
"BSD-2-Clause"
] | 12 | 2015-10-30T09:25:09.000Z | 2021-12-09T17:06:50.000Z | tests/beastrun_tests.py | SimonGreenhill/BEASTling | b3da97240e632ac720f210cce4477fb29d49de84 | [
"BSD-2-Clause"
] | 254 | 2015-11-03T10:37:05.000Z | 2021-07-23T19:57:35.000Z | tests/beastrun_tests.py | SimonGreenhill/BEASTling | b3da97240e632ac720f210cce4477fb29d49de84 | [
"BSD-2-Clause"
] | 9 | 2015-11-02T09:57:31.000Z | 2021-05-12T00:48:03.000Z | import os
import subprocess
from xml.etree import ElementTree as et
import shutil
import pathlib
import warnings
import pytest
import beastling.configuration
import beastling.beastxml
skip = [
("admin", "mk", "cldf_data_with_comma", "rate_var"),
# Beast interprets commas as separating alternative IDs (we
# think), so identical text before the comma -- as happens for the
# rate parameters in this test, because of the features' IDs --
# leads to beast finding duplicate IDs and dying. This behaviour
# is documented in the guidelines for IDs, but it would be nice to
# get rid of it, either by not creating objects with commas in IDs
# or by fixing beast not to split IDs.
]
@pytest.mark.beast
@pytest.mark.slow
@pytest.mark.parametrize(
'configs,assertion',
[
(("admin", "mk", "subsample"), None),
(("admin", "mk", "cldf_data_with_nonstandard_value_column"), None),
(("admin", "mk"), None),
(("admin", "mk", "birthdeath"), None),
(("admin", "mk", "uniform_treeprior"), None),
(("admin", "mk_as_if_addon"), None),
(("admin", "cldf_data"), None),
(("admin", "cldf1_wordlist"), None),
(("admin", "cldf1_wordlist_with_lang_table"), None),
(("admin", "cldf1_wordlist_external_codes"), None),
(("admin", "cldf1_structure"), None),
(("admin", "nonnumeric"), None),
(("admin", "noncode"), None),
(("admin", "bsvs"), None),
(("admin", "mk", "strictclockwithprior"), None),
(("admin", "binaryctmc"), None),
(("admin", "binaryctmc", "gamma_categories"), None),
(("admin", "binaryctmc", "estimated_freqs"), None),
(("admin", "binaryctmc", "rate_var"), None),
(("admin", "binaryctmc", "estimated_freqs", "rate_var"), None),
(("admin", "covarion_multistate"), None),
(("admin", "covarion_multistate", "covarion_per_feature_params"), None),
(("admin", "covarion_multistate", "ascertainment_true"), None),
(("admin", "covarion_multistate", "rate_var"), None),
(("admin", "covarion_multistate", "estimated_freqs"), None),
(("admin", "covarion_multistate", "do_not_share_params"), None),
(("admin", "covarion_multistate", "estimated_freqs", "rate_var"), None),
(("admin", "covarion_true_binary"), None),
(("admin", "covarion_binarised"), None),
(("admin", "bsvs", "robust_eigen"), None),
(("admin", "covarion_multistate", "robust_eigen"), None),
(("admin", "mk", "families"), None),
(("admin", "mk", "features"), None),
(("admin", "mk", "estimated_freqs"), None),
(("admin", "mk", "approx_freqs"), None),
(("admin", "mk", "uniform_freqs"), None),
(("admin", "bsvs", "estimated_freqs"), None),
(("admin", "covarion_multistate", "estimated_freqs"), None),
(("admin", "mk", "rate_var"), None),
(("admin", "mk", "rate_var", "rate_var_user_rates"), None),
(("admin", "mk", "rate_var", "rate_partition"), None),
(("admin", "mk", "rate_var", "rate_partition", "rate_partition_user_rates"), None),
(("admin", "mk", "rate_partition", "rate_partition_user_rates"), None),
(("admin", "mk", "monophyletic"), None),
(("admin", "mk", "monophyletic-bottom-up"), None),
(("admin", "mk", "monophyletic-partial"), None),
(("admin", "mk", "no_screen_logging"), None),
(("admin", "mk", "no_file_logging"), None),
(("admin", "mk", "starting_tree"), None),
(("admin", "mk", "starting_tree_with_internal_names"), None),
(("admin", "mk", "monophyly_tree"), None),
(("admin", "mk", "monophyly_tree_with_internal_names"), None),
(("admin", "mk", "sample_prior"), None),
(("admin", "mk", "union"), None),
(("admin", "mk", "intersection"), None),
(("admin", "mk", "relaxed"), None),
(("admin", "mk", "relaxed_params"), None),
(("admin", "mk", "relaxed_expon"), None),
(("admin", "mk", "relaxed_gamma"), None),
(("admin", "mk", "random"), None),
(("admin", "mk", "feature_with_comma"), None),
(("admin", "mk", "cldf_data_with_comma"), None),
(("admin", "mk", "cldf_data_with_comma", "rate_var"), None),
(("admin", "mk", "calibration"), None),
(("admin", "mk", "calibration_by_iso"), None),
(("admin", "mk", "calibration_nested"), None),
(("admin", "mk", "calibration_disjoint"), None),
(("admin", "mk", "calibration_nested_root"), None),
# Test below has calibration on Austronesian, but macroareas=Africa,
# resulting in an emtpy calibration, which is the point of the test
(("admin", "mk", "calibration", "macroareas"), None),
(("admin", "mk", "calibration_originate"), None),
(("admin", "mk", "calibration_uniform_params"), None),
(("admin", "mk", "calibration_normal_params"), None),
(("admin", "mk", "calibration_lognormal_params"), None),
(("admin", "mk", "calibration_upper_bound"), None),
(("admin", "mk", "calibration_lower_bound"), None),
(("admin", "mk", "calibration", "relaxed"), None),
(("admin", "mk", "calibration", "random"), None),
(("admin", "mk", "calibration", "monophyletic"), None),
(("admin", "mk", "calibration_tip"), None),
(("admin", "mk", "calibration_tip_multiple"), None),
(("admin", "mk", "calibration_tip_originate_explicit"), None),
(("admin", "mk", "calibration_tip_fixed"), None),
(("admin", "mk", "calibration_tip_before"), None),
(("admin", "mk", "calibration_tip_after"), None),
(("admin", "mk", "calibration_tip_uniform"), None),
(("admin", "mk", "pruned"), None),
(("admin", "mk", "pruned", "relaxed"), None),
(("admin", "mk", "geo"), None),
(("admin", "mk", "geo", "geo_user_loc"), None),
(("admin", "mk", "geo", "geo_sampled_tip"), None),
(("admin", "mk", "geo", "geo_tip_prior"), None),
(("admin", "mk", "geo_own_clock"), None),
(("admin", "mk", "monophyletic", "geo", "geo_sampled"), None),
(("admin", "mk", "monophyletic", "geo", "geo_prior"), None),
(("admin", "covarion_multistate", "pseudodollocovarion"), None),
(("admin", "covarion_multistate", "log_fine_probs", "pseudodollocovarion"), None),
(("admin", "covarion_multistate", "covarion_per_feature_params", "pseudodollocovarion"), None),
pytest.param(
("admin", "covarion_multistate", "robust_eigen", "pseudodollocovarion"), None,
# Currently, Beast's pseudodollocovarion model does not support the
# robust eigensystem implementation.
marks=pytest.mark.xfail),
(("admin", "covarion_multistate", "pseudodollocovarion_fix_freq"), None),
# Test that for 'log_fine_probs=True', probabilites are logged:
(
("admin", "covarion_multistate", "log_fine_probs"),
lambda dir: dir.joinpath("beastling_test.log").exists()),
# Test the root ASR output.
(
("admin", "mk", "ancestral_state_reconstruction", "ascertainment_false"),
lambda dir: dir.joinpath("beastling_test_reconstructed.log").exists()),
# Test the root ASR output under a binary (covarion) model.
(
("admin", "covarion_multistate", "ancestral_state_reconstruction", "ascertainment_false"),
lambda dir: dir.joinpath("beastling_test.log").exists()),
# Test the root ASR output under a Mk model with ascertainment correction.
(
("admin", "mk", "ancestral_state_reconstruction", "ascertainment_true"),
lambda dir: dir.joinpath("beastling_test.log").exists()),
# Test the root ASR output under a binary model with ascertainment correction.
(
("admin", "covarion_multistate", "ancestral_state_reconstruction", "ascertainment_true"),
lambda dir: dir.joinpath("beastling_test.log").exists()),
# Test the full-tree ASR output.
(
("admin", "mk", "ancestral_state_reconstruction", "taxa", "reconstruct_all"),
lambda dir: dir.joinpath("beastling_test.log").exists()),
# Test the clade ASR output.
(
("admin", "mk", "ancestral_state_reconstruction", "taxa", "reconstruct_one"),
lambda dir: dir.joinpath("beastling_test.log").exists()),
]
)
def test_beastrun(configs, assertion, config_factory, tmppath):
"""Turn each BEASTling config file in tests/configs into a
BEAST.xml, and feed it to BEAST, testing for a zero return
value, which suggests no deeply mangled XML."""
if configs in skip:
return
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
temp_filename = tmppath / 'test'
xml = beastling.beastxml.BeastXml(config_factory(*configs))
xml.write_file(str(temp_filename))
debug_copy = pathlib.Path('_test.xml')
shutil.copy(str(temp_filename), str(debug_copy))
xml.validate_ids()
if os.environ.get('CI'):
et.parse(str(temp_filename))
else:
## Data files etc. are all referenced by paths relative to the repos root.
#shutil.copytree(str(pathlib.Path(__file__).parent), str(tmppath / 'tests'))
try:
subprocess.check_call(
['beast', '-java', '-overwrite', str(temp_filename)],
cwd=str(tmppath),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise AssertionError(
"Beast run on {:} returned non-zero exit status "
"{:d}".format(configs, e.returncode))
if assertion:
assert assertion(tmppath)
if debug_copy.exists():
debug_copy.unlink()
| 49.807882 | 106 | 0.578973 |
e711744eec61923ce6d4de0ff7a2dd1da9fc2de9 | 5,954 | py | Python | dvc/command/run.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | 1 | 2020-07-25T08:23:32.000Z | 2020-07-25T08:23:32.000Z | dvc/command/run.py | aliseramirez/dvc | 92cc9f7e6f19f3b92f43e28131fe50c20b297214 | [
"Apache-2.0"
] | null | null | null | dvc/command/run.py | aliseramirez/dvc | 92cc9f7e6f19f3b92f43e28131fe50c20b297214 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import argparse
import logging
from dvc.command.base import CmdBase, append_doc_link
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
class CmdRun(CmdBase):
def run(self):
overwrite = self.args.yes or self.args.overwrite_dvcfile
if not any(
[
self.args.deps,
self.args.outs,
self.args.outs_no_cache,
self.args.metrics,
self.args.metrics_no_cache,
self.args.outs_persist,
self.args.outs_persist_no_cache,
self.args.command,
]
): # pragma: no cover
logger.error(
"too few arguments. Specify at least one: '-d', '-o', '-O',"
" '-m', '-M', '--outs-persist', '--outs-persist-no-cache',"
" 'command'."
)
return 1
try:
self.repo.run(
cmd=self._parsed_cmd(),
outs=self.args.outs,
outs_no_cache=self.args.outs_no_cache,
metrics=self.args.metrics,
metrics_no_cache=self.args.metrics_no_cache,
deps=self.args.deps,
fname=self.args.file,
cwd=self.args.cwd,
wdir=self.args.wdir,
no_exec=self.args.no_exec,
overwrite=overwrite,
ignore_build_cache=self.args.ignore_build_cache,
remove_outs=self.args.remove_outs,
no_commit=self.args.no_commit,
outs_persist=self.args.outs_persist,
outs_persist_no_cache=self.args.outs_persist_no_cache,
)
except DvcException:
logger.exception("failed to run command")
return 1
return 0
def _parsed_cmd(self):
"""
We need to take into account two cases:
- ['python code.py foo bar']: Used mainly with dvc as a library
- ['echo', 'foo bar']: List of arguments received from the CLI
The second case would need quoting, as it was passed through:
dvc run echo "foo bar"
"""
if len(self.args.command) < 2:
return " ".join(self.args.command)
return " ".join(self._quote_argument(arg) for arg in self.args.command)
def _quote_argument(self, argument):
if " " not in argument or '"' in argument:
return argument
return '"{}"'.format(argument)
def add_parser(subparsers, parent_parser):
RUN_HELP = "Generate a stage file from a command and execute the command."
run_parser = subparsers.add_parser(
"run",
parents=[parent_parser],
description=append_doc_link(RUN_HELP, "run"),
help=RUN_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
run_parser.add_argument(
"-d",
"--deps",
action="append",
default=[],
help="Declare dependencies for reproducible cmd.",
)
run_parser.add_argument(
"-o",
"--outs",
action="append",
default=[],
help="Declare output file or directory.",
)
run_parser.add_argument(
"-O",
"--outs-no-cache",
action="append",
default=[],
help="Declare output file or directory "
"(do not put into DVC cache).",
)
run_parser.add_argument(
"-m",
"--metrics",
action="append",
default=[],
help="Declare output metric file or directory.",
)
run_parser.add_argument(
"-M",
"--metrics-no-cache",
action="append",
default=[],
help="Declare output metric file or directory "
"(do not put into DVC cache).",
)
run_parser.add_argument(
"-f", "--file", help="Specify name of the DVC file it generates."
)
run_parser.add_argument(
"-c", "--cwd", default=None, help="Deprecated, use -w and -f instead."
)
run_parser.add_argument(
"-w",
"--wdir",
default=None,
help="Directory within your repo to run your command in.",
)
run_parser.add_argument(
"--no-exec",
action="store_true",
default=False,
help="Only create stage file without actually running it.",
)
run_parser.add_argument(
"-y",
"--yes",
action="store_true",
default=False,
help="Deprecated, use --overwrite-dvcfile instead",
)
run_parser.add_argument(
"--overwrite-dvcfile",
action="store_true",
default=False,
help="Overwrite existing dvc file without asking for confirmation.",
)
run_parser.add_argument(
"--ignore-build-cache",
action="store_true",
default=False,
help="Run this stage even if it has been already ran with the same "
"command/dependencies/outputs/etc before.",
)
run_parser.add_argument(
"--remove-outs",
action="store_true",
default=False,
help="Deprecated, this is now the default behavior",
)
run_parser.add_argument(
"--no-commit",
action="store_true",
default=False,
help="Don't put files/directories into cache.",
)
run_parser.add_argument(
"--outs-persist",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro.",
)
run_parser.add_argument(
"--outs-persist-no-cache",
action="append",
default=[],
help="Declare output file or directory that will not be "
"removed upon repro (do not put into DVC cache).",
)
run_parser.add_argument(
"command", nargs=argparse.REMAINDER, help="Command to execute."
)
run_parser.set_defaults(func=CmdRun)
| 30.22335 | 79 | 0.562815 |
ac74f30ea8769f2fe0c5baa7ebf91a3d326b5975 | 268 | py | Python | hooks/pre-push.py | maltanar/dataset_loading | 75ba02f16735c78dfbd3e3962b484b9b57189dcd | [
"MIT"
] | 10 | 2018-03-08T11:15:17.000Z | 2021-09-12T12:35:26.000Z | hooks/pre-push.py | maltanar/dataset_loading | 75ba02f16735c78dfbd3e3962b484b9b57189dcd | [
"MIT"
] | null | null | null | hooks/pre-push.py | maltanar/dataset_loading | 75ba02f16735c78dfbd3e3962b484b9b57189dcd | [
"MIT"
] | 4 | 2018-07-06T15:54:35.000Z | 2021-03-04T08:10:07.000Z | #!/usr/bin/env python
import argparse
import pytest
def parse_args():
parser = argparse.ArgumentParser()
args = parser.parse_args()
return args
def main(args=None):
pytest.main()
if __name__ == '__main__':
args = parse_args()
main(args)
| 13.4 | 38 | 0.660448 |
d13934a14961c6fe39508a55a7b38b4ed77ba16c | 1,073 | py | Python | tests/test_median.py | vbrednikov/nginx-loganalyzer | 29221c5305b3e4443a60215be0764369cdcaab11 | [
"BSD-2-Clause"
] | null | null | null | tests/test_median.py | vbrednikov/nginx-loganalyzer | 29221c5305b3e4443a60215be0764369cdcaab11 | [
"BSD-2-Clause"
] | null | null | null | tests/test_median.py | vbrednikov/nginx-loganalyzer | 29221c5305b3e4443a60215be0764369cdcaab11 | [
"BSD-2-Clause"
] | null | null | null | import unittest
from decimal import Decimal
from nginx_loganalyzer import median
class TestMedian(unittest.TestCase):
def test_median_empty(self):
self.assertIsNone(median([]))
def test_median_1_element(self):
self.assertEqual(median([1]), 1)
def test_median_2_elements(self):
self.assertEqual(median([1, 2]), 1.5)
def test_median_wages_case_even(self):
self.assertEqual(median([Decimal(x)
for x in
'1, 2, 1.5, 2, 3, 2.2, 2.1, 10000'.split(',')
]
), Decimal('2.05'))
def test_median_wages_case_odd(self):
self.assertEqual(median([1, 1.9, 2, 1.5, 4, 3, 2.2, 2.1, 10000]), 2.1)
def test_median_decimal(self):
data = [Decimal(x)
for x in
'0.390 0.133 0.199 0.704 0.146 0.628 0.067 0.138 0.003 0.157'.split()
]
self.assertEqual(median(data), Decimal('0.1515'))
if __name__ == '__main__':
unittest.main()
| 29 | 85 | 0.541473 |
f27be28dbe93fc551bbfe50247a88db5d4d2922e | 312 | py | Python | app/app/asgi.py | Diaga/knctU-Server | a3fb9ac00662fcdf6066ee838fa8c40bb8c208f3 | [
"MIT"
] | 1 | 2021-08-02T18:32:08.000Z | 2021-08-02T18:32:08.000Z | app/app/asgi.py | Diaga/knctU-Server | a3fb9ac00662fcdf6066ee838fa8c40bb8c208f3 | [
"MIT"
] | 5 | 2021-03-09T17:02:32.000Z | 2022-02-26T17:28:38.000Z | app/app/asgi.py | Diaga/knctU-Server | a3fb9ac00662fcdf6066ee838fa8c40bb8c208f3 | [
"MIT"
] | 1 | 2021-08-02T18:32:09.000Z | 2021-08-02T18:32:09.000Z | """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
django.setup()
application = get_default_application()
| 24 | 64 | 0.817308 |
7da629a6f353ffff0069d3472f4c2b616d412ca3 | 8,187 | py | Python | adafruit_pyportal/network.py | makermelissa/Adafruit_CircuitPython_PyPortal | d04c2a4d8dbb5ded2ab84459c4d5eab1c097ef1d | [
"Unlicense",
"MIT-0",
"MIT"
] | 49 | 2019-02-24T12:15:06.000Z | 2022-03-30T15:59:01.000Z | adafruit_pyportal/network.py | makermelissa/Adafruit_CircuitPython_PyPortal | d04c2a4d8dbb5ded2ab84459c4d5eab1c097ef1d | [
"Unlicense",
"MIT-0",
"MIT"
] | 74 | 2019-02-23T17:26:06.000Z | 2022-01-14T16:11:58.000Z | adafruit_pyportal/network.py | makermelissa/Adafruit_CircuitPython_PyPortal | d04c2a4d8dbb5ded2ab84459c4d5eab1c097ef1d | [
"Unlicense",
"MIT-0",
"MIT"
] | 63 | 2019-02-22T19:25:29.000Z | 2022-01-13T18:04:49.000Z | # SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_pyportal.network`
================================================================================
CircuitPython driver for Adafruit PyPortal.
* Author(s): Limor Fried, Kevin J. Walters, Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit PyPortal <https://www.adafruit.com/product/4116>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
import neopixel
from adafruit_portalbase.wifi_coprocessor import WiFi
# pylint: disable=unused-import
from adafruit_portalbase.network import (
NetworkBase,
CONTENT_JSON,
CONTENT_TEXT,
)
# pylint: enable=unused-import
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyPortal.git"
# you'll need to pass in an io username, width, height, format (bit depth), io key, and then url!
IMAGE_CONVERTER_SERVICE = (
"https://io.adafruit.com/api/v2/%s/integrations/image-formatter?"
"x-aio-key=%s&width=%d&height=%d&output=BMP%d&url=%s"
)
class Network(NetworkBase):
"""Class representing the Adafruit PyPortal.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
:param bool extract_values: If true, single-length fetched values are automatically extracted
from lists and tuples. Defaults to ``True``.
:param debug: Turn on debug print outs. Defaults to False.
:param convert_image: Determine whether or not to use the AdafruitIO image converter service.
Set as False if your image is already resized. Defaults to True.
:param image_url_path: The HTTP traversal path for a background image to display.
Defaults to ``None``.
:param image_json_path: The JSON traversal path for a background image to display. Defaults to
``None``.
:param image_resize: What size to resize the image we got from the json_path, make this a tuple
of the width and height you want. Defaults to ``None``.
:param image_position: The position of the image on the display as an (x, y) tuple. Defaults to
``None``.
:param image_dim_json_path: The JSON traversal path for the original dimensions of image tuple.
Used with fetch(). Defaults to ``None``.
"""
def __init__(
self,
*,
status_neopixel=None,
esp=None,
external_spi=None,
extract_values=True,
debug=False,
convert_image=True,
image_url_path=None,
image_json_path=None,
image_resize=None,
image_position=None,
image_dim_json_path=None,
secrets_data=None,
):
if status_neopixel:
status_led = neopixel.NeoPixel(status_neopixel, 1, brightness=0.2)
else:
status_led = None
wifi = WiFi(status_led=status_led, esp=esp, external_spi=external_spi)
super().__init__(
wifi,
extract_values=extract_values,
debug=debug,
secrets_data=secrets_data,
)
self._convert_image = convert_image
self._image_json_path = image_json_path
self._image_url_path = image_url_path
self._image_resize = image_resize
self._image_position = image_position
self._image_dim_json_path = image_dim_json_path
gc.collect()
@property
def ip_address(self):
"""Return the IP Address nicely formatted"""
return self._wifi.esp.pretty_ip(self._wifi.esp.ip_address)
def image_converter_url(self, image_url, width, height, color_depth=16):
"""Generate a converted image url from the url passed in,
with the given width and height. aio_username and aio_key must be
set in secrets."""
try:
aio_username = self._secrets["aio_username"]
aio_key = self._secrets["aio_key"]
except KeyError as error:
raise KeyError(
"\n\nOur image converter service require a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'" # pylint: disable=line-too-long
) from error
return IMAGE_CONVERTER_SERVICE % (
aio_username,
aio_key,
width,
height,
color_depth,
image_url,
)
# pylint: disable=too-many-branches, too-many-statements
def process_image(self, json_data, sd_card=False):
"""
Process image content
:param json_data: The JSON data that we can pluck values from
:param bool sd_card: Whether or not we have an SD card inserted
"""
filename = None
position = None
image_url = None
if self._image_url_path:
image_url = self._image_url_path
if self._image_json_path:
image_url = self.json_traverse(json_data, self._image_json_path)
iwidth = 0
iheight = 0
if self._image_dim_json_path:
iwidth = int(self.json_traverse(json_data, self._image_dim_json_path[0]))
iheight = int(self.json_traverse(json_data, self._image_dim_json_path[1]))
print("image dim:", iwidth, iheight)
if image_url:
print("original URL:", image_url)
if self._convert_image:
if iwidth < iheight:
image_url = self.image_converter_url(
image_url,
int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
),
self._image_resize[1],
)
else:
image_url = self.image_converter_url(
image_url, self._image_resize[0], self._image_resize[1]
)
print("convert URL:", image_url)
# convert image to bitmap and cache
# print("**not actually wgetting**")
filename = "/cache.bmp"
chunk_size = 4096 # default chunk size is 12K (for QSPI)
if sd_card:
filename = "/sd" + filename
chunk_size = 512 # current bug in big SD writes -> stick to 1 block
try:
self.wget(image_url, filename, chunk_size=chunk_size)
except OSError as error:
raise OSError(
"""\n\nNo writable filesystem found for saving datastream. Insert an SD card or set internal filesystem to be unsafe by setting 'disable_concurrent_write_protection' in the mount options in boot.py""" # pylint: disable=line-too-long
) from error
except RuntimeError as error:
raise RuntimeError("wget didn't write a complete file") from error
if iwidth < iheight:
pwidth = int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
)
position = (
self._image_position[0] + int((self._image_resize[0] - pwidth) / 2),
self._image_position[1],
)
else:
position = self._image_position
image_url = None
gc.collect()
return filename, position
| 38.07907 | 253 | 0.595212 |
19659d0f90e82fc943fa1f962817b4189ac83c3c | 596 | py | Python | app.py | LleeMcD/Mission-to-Mars | b27130208029e3ca13e66c11e8bdd74e79509a30 | [
"ADSL"
] | 1 | 2022-01-02T13:23:42.000Z | 2022-01-02T13:23:42.000Z | app.py | LleeMcD/Mission-to-Mars | b27130208029e3ca13e66c11e8bdd74e79509a30 | [
"ADSL"
] | null | null | null | app.py | LleeMcD/Mission-to-Mars | b27130208029e3ca13e66c11e8bdd74e79509a30 | [
"ADSL"
] | null | null | null | from flask import Flask, render_template, redirect, url_for
from flask_pymongo import PyMongo
import scraping
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
mars_data = scraping.scrape_all()
mars.update({}, mars_data, upsert=True)
return redirect('/', code=302)
if __name__ == "__main__":
app.run() | 24.833333 | 62 | 0.714765 |
861e7297c13f03eecaf9d3ea4cd74288cc0a02e9 | 2,388 | py | Python | data/p4VQE/R1/benchmark/startQiskit_noisy66.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R1/benchmark/startQiskit_noisy66.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R1/benchmark/startQiskit_noisy66.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.h(input_qubit[0]) # number=7
prog.cz(input_qubit[1],input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[1],input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_noisy66.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 26.831461 | 118 | 0.631072 |
ad3705a7df059e60b3a873854622e54dfb7ae51a | 7,047 | py | Python | virtual/lib/python3.6/site-packages/astroid/exceptions.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 3 | 2018-10-21T14:01:01.000Z | 2018-10-22T14:42:22.000Z | virtual/lib/python3.6/site-packages/astroid/exceptions.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 12 | 2018-10-03T19:45:36.000Z | 2022-03-11T23:54:25.000Z | virtual/lib/python3.6/site-packages/astroid/exceptions.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 3 | 2020-01-19T21:26:14.000Z | 2020-11-04T08:37:38.000Z | # Copyright (c) 2007, 2009-2010, 2013 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains exceptions used in the astroid library
"""
from astroid import util
class AstroidError(Exception):
"""base exception class for all astroid related exceptions
AstroidError and its subclasses are structured, intended to hold
objects representing state when the exception is thrown. Field
values are passed to the constructor as keyword-only arguments.
Each subclass has its own set of standard fields, but use your
best judgment to decide whether a specific exception instance
needs more or fewer fields for debugging. Field values may be
used to lazily generate the error message: self.message.format()
will be called with the field names and values supplied as keyword
arguments.
"""
def __init__(self, message='', **kws):
super(AstroidError, self).__init__(message)
self.message = message
for key, value in kws.items():
setattr(self, key, value)
def __str__(self):
return self.message.format(**vars(self))
class AstroidBuildingError(AstroidError):
"""exception class when we are unable to build an astroid representation
Standard attributes:
modname: Name of the module that AST construction failed for.
error: Exception raised during construction.
"""
def __init__(self, message='Failed to import module {modname}.', **kws):
super(AstroidBuildingError, self).__init__(message, **kws)
class AstroidImportError(AstroidBuildingError):
"""Exception class used when a module can't be imported by astroid."""
class TooManyLevelsError(AstroidImportError):
"""Exception class which is raised when a relative import was beyond the top-level.
Standard attributes:
level: The level which was attempted.
name: the name of the module on which the relative import was attempted.
"""
level = None
name = None
def __init__(self, message='Relative import with too many levels '
'({level}) for module {name!r}', **kws):
super(TooManyLevelsError, self).__init__(message, **kws)
class AstroidSyntaxError(AstroidBuildingError):
"""Exception class used when a module can't be parsed."""
class NoDefault(AstroidError):
"""raised by function's `default_value` method when an argument has
no default value
Standard attributes:
func: Function node.
name: Name of argument without a default.
"""
func = None
name = None
def __init__(self, message='{func!r} has no default for {name!r}.', **kws):
super(NoDefault, self).__init__(message, **kws)
class ResolveError(AstroidError):
"""Base class of astroid resolution/inference error.
ResolveError is not intended to be raised.
Standard attributes:
context: InferenceContext object.
"""
context = None
class MroError(ResolveError):
"""Error raised when there is a problem with method resolution of a class.
Standard attributes:
mros: A sequence of sequences containing ClassDef nodes.
cls: ClassDef node whose MRO resolution failed.
context: InferenceContext object.
"""
mros = ()
cls = None
def __str__(self):
mro_names = ", ".join("({})".format(", ".join(b.name for b in m))
for m in self.mros)
return self.message.format(mros=mro_names, cls=self.cls)
class DuplicateBasesError(MroError):
"""Error raised when there are duplicate bases in the same class bases."""
class InconsistentMroError(MroError):
"""Error raised when a class's MRO is inconsistent."""
class SuperError(ResolveError):
"""Error raised when there is a problem with a *super* call.
Standard attributes:
*super_*: The Super instance that raised the exception.
context: InferenceContext object.
"""
super_ = None
def __str__(self):
return self.message.format(**vars(self.super_))
class InferenceError(ResolveError):
"""raised when we are unable to infer a node
Standard attributes:
node: The node inference was called on.
context: InferenceContext object.
"""
node = None
context = None
def __init__(self, message='Inference failed for {node!r}.', **kws):
super(InferenceError, self).__init__(message, **kws)
# Why does this inherit from InferenceError rather than ResolveError?
# Changing it causes some inference tests to fail.
class NameInferenceError(InferenceError):
"""Raised when a name lookup fails, corresponds to NameError.
Standard attributes:
name: The name for which lookup failed, as a string.
scope: The node representing the scope in which the lookup occurred.
context: InferenceContext object.
"""
name = None
scope = None
def __init__(self, message='{name!r} not found in {scope!r}.', **kws):
super(NameInferenceError, self).__init__(message, **kws)
class AttributeInferenceError(ResolveError):
"""Raised when an attribute lookup fails, corresponds to AttributeError.
Standard attributes:
target: The node for which lookup failed.
attribute: The attribute for which lookup failed, as a string.
context: InferenceContext object.
"""
target = None
attribute = None
def __init__(self, message='{attribute!r} not found on {target!r}.', **kws):
super(AttributeInferenceError, self).__init__(message, **kws)
class UseInferenceDefault(Exception):
"""exception to be raised in custom inference function to indicate that it
should go back to the default behaviour
"""
class _NonDeducibleTypeHierarchy(Exception):
"""Raised when is_subtype / is_supertype can't deduce the relation between two types."""
class AstroidIndexError(AstroidError):
"""Raised when an Indexable / Mapping does not have an index / key."""
class AstroidTypeError(AstroidError):
"""Raised when a TypeError would be expected in Python code."""
class InferenceOverwriteError(AstroidError):
"""Raised when an inference tip is overwritten
Currently only used for debugging.
"""
# Backwards-compatibility aliases
OperationError = util.BadOperationMessage
UnaryOperationError = util.BadUnaryOperationMessage
BinaryOperationError = util.BadBinaryOperationMessage
SuperArgumentTypeError = SuperError
UnresolvableName = NameInferenceError
NotFoundError = AttributeInferenceError
AstroidBuildingException = AstroidBuildingError
| 32.325688 | 92 | 0.703278 |
da9c61c88b0b014e1d64705ccc9b212c0987d738 | 608 | py | Python | src/users/migrations/0021_alter_contact_user.py | Rey092/SwipeApp | 8451912aeb8b1a8fcd52bd9c58afb09b3e49768d | [
"MIT"
] | null | null | null | src/users/migrations/0021_alter_contact_user.py | Rey092/SwipeApp | 8451912aeb8b1a8fcd52bd9c58afb09b3e49768d | [
"MIT"
] | null | null | null | src/users/migrations/0021_alter_contact_user.py | Rey092/SwipeApp | 8451912aeb8b1a8fcd52bd9c58afb09b3e49768d | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-08-15 07:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("users", "0020_alter_file_file"),
]
operations = [
migrations.AlterField(
model_name="contact",
name="user",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
]
| 23.384615 | 60 | 0.575658 |
af26cd84a1a1e9cd8c6ecdd0a0e0ef801002872c | 5,184 | py | Python | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_vm_parameters.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_vm_parameters.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 3 | 2021-03-26T00:48:20.000Z | 2022-03-29T22:05:39.000Z | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/test_vm_parameters.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 1 | 2017-12-28T04:51:44.000Z | 2017-12-28T04:51:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from azure.cli.core.application import APPLICATION, Configuration
def mock_echo_args(command_name, parameters):
try:
argv = ' '.join((command_name, parameters)).split()
APPLICATION.initialize(Configuration())
command_table = APPLICATION.configuration.get_command_table(argv)
prefunc = command_table[command_name].handler
command_table[command_name].handler = lambda args: args
parsed_namespace = APPLICATION.execute(argv)
return parsed_namespace
finally:
command_table[command_name].handler = prefunc
class TestVMValidators(unittest.TestCase):
def _mock_get_subscription_id():
return '00000000-0000-0000-0000-000000000000'
@mock.patch('azure.cli.core.commands.client_factory.get_subscription_id', _mock_get_subscription_id)
def test_vm_nics(self):
from argparse import Namespace
from azure.cli.command_modules.vm._validators import _validate_vm_create_nics
for i in range(0, 100):
ns = Namespace()
ns.resource_group_name = 'rg'
ns.nics = ['nic1', 'nic2']
_validate_vm_create_nics(ns)
nic1_expected = {
"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
"properties": {
"primary": True
}
}
nic2_expected = {
"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic2",
"properties": {
"primary": False
}
}
self.assertEqual(ns.nics[0], nic1_expected)
self.assertEqual(ns.nics[1], nic2_expected)
class Test_ArgumentParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_parse_vm_show(self):
# If we use an ID as the positional parameter, we should
# extract the resource group and name from it...
args = mock_echo_args('vm show',
'--id /subscriptions/00000000-0000-0000-0000-0123456789abc/resourceGroups/thisisaresourcegroup/providers/Microsoft.Compute/virtualMachines/thisisavmname') # pylint: disable=line-too-long
self.assertDictEqual({
'resource_group_name': 'thisisaresourcegroup',
'vm_name': 'thisisavmname',
'show_details': False
}, args.result)
# Invalid resource ID should trigger the missing resource group
# parameter failure
with self.assertRaises(SystemExit):
mock_echo_args('vm show', '--id /broken')
# Got to provide a resource group if you are using a simple name and
# not an ID as a parameter
with self.assertRaises(SystemExit):
mock_echo_args('vm show', '--id missing-resource-group')
def test_parse_vm_list(self):
# Resource group name is optional for vm list, so
# we should see a successfully parsed namespace
args = mock_echo_args('vm list', '')
self.assertDictEqual({
'resource_group_name': None,
'show_details': False
}, args.result)
# if resource group name is specified, however,
# it should get passed through...
args = mock_echo_args('vm list', '-g hullo')
self.assertDictEqual({
'resource_group_name': 'hullo',
'show_details': False
}, args.result)
consistent_arguments = {
'resource_group_name': ('--resource-group', '-g'),
'virtual_machine_name': ('--vm-name',),
}
def test_command_consistency(self):
argv = ['vm']
APPLICATION.initialize(Configuration())
command_table = APPLICATION.configuration.get_command_table(argv)
vm_commands = ((vm_command, metadata) for vm_command, metadata
in command_table.items() if vm_command.startswith('vm'))
for command_name, command_metadata in vm_commands:
for argument_name, expected_options_list in self.consistent_arguments.items():
try:
actual_options_list = command_metadata.arguments[argument_name].options_list
self.assertEqual(actual_options_list, expected_options_list,
'Argument {} of command {} has inconsistent flags'.format(
argument_name,
command_name
))
except KeyError:
pass
if __name__ == '__main__':
unittest.main()
| 38.977444 | 217 | 0.599344 |
a5490da55fb9c37fea0b2e8d083f629a2db3f889 | 4,593 | py | Python | src/m3.py | 4n0Wd/05a-Debugging | dd5084987d638c16df81f4dd89e446a9edd0c06e | [
"MIT"
] | null | null | null | src/m3.py | 4n0Wd/05a-Debugging | dd5084987d638c16df81f4dd89e446a9edd0c06e | [
"MIT"
] | null | null | null | src/m3.py | 4n0Wd/05a-Debugging | dd5084987d638c16df81f4dd89e446a9edd0c06e | [
"MIT"
] | null | null | null | """
This module lets you practice DEBUGGING when LOGIC ERRORS occur.
That is, no run-time exception occurs, but the function simply
does not do the right thing.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Henry.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
#
# DONE: 2. READ these instructions, ASKING QUESTIONS as needed.
#
# This module contains "broken" functions, as in m1 and m2.
# FOLLOW THE SAME STEPS as in the instructions of m1.py
# to find and correct the mistakes in these functions.
#
# The broken functions in here have LOGIC errors.
# The code does NOT break when you run it,
# but it does not produce the correct output.
#
# In THIS module, the mistakes may be ANYWHERE in the module
# EXCEPT:
# -- The is_prime function below is correct.
# -- The tests themselves are correct.
#
# *** IMPORTANT: ***
# Resist the urge to "fiddle" with the code until you stumble
# upon something that works. This exercise will be helpful
# to you ONLY if you use it as an opportunity to learn
# what the error messages mean and how to react to them.
#
# *** ASK QUESTIONS AS NEEDED! ***
#
# When you believe you understand these instructions,
# change the above TO DO to DONE.
#
########################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_broken_1()
########################################################################
# Students:
# Do NOT touch the following is_prime function - it has no TO DO.
# Do NOT copy code from the is_prime function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# There are NO errors in this is_prime function.
########################################################################
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
########################################################################
# Students: Do NOT change any of the TEST functions.
# There are NO errors in the TESTS.
########################################################################
def run_test_broken_1():
""" Tests the broken_1 function. """
print()
print('--------------------------------------------------')
print('Testing the broken_1 function:')
print('--------------------------------------------------')
expected = 3
actual = broken_1(3) # Test 1 of broken_1
print('Expected:', expected)
print('Actual: ', actual)
expected = 4
actual = broken_1(10) # Test 2 of broken_1
print('Expected:', expected)
print('Actual: ', actual)
expected = 135 # Yes, this is the correct answer
actual = broken_1(1000) # Test 3 of broken_1
print('Expected:', expected)
print('Actual: ', actual)
# ----------------------------------------------------------------------
# DONE: 3. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_1(m):
"""
What comes in: a positive integer m that is at least 2.
What goes out: Returns the number of prime numbers
between m and (2m + 1) inclusive.
Side effects: None.
Examples:
If m is 3, this function returns 3 since there
are 3 primes between 3 and 7 (namely: 3, 5, and 7).
If m is 10, then this function returns 4 since there
are 4 primes between 10 and 21 (namely: 11, 13, 17 and 19).
Type hints:
:type m: int
"""
# ** For full credit you must appropriately
# ** use (call) the is_prime function that is DEFINED ABOVE.
count = 0
for k in range(m + 2):
if is_prime(k + m):
count = count + 1
return count
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 34.276119 | 72 | 0.521881 |
5d1cc2d544e08adacc9bdaac0cca6cb53e53bb65 | 27,146 | py | Python | kairon/utils.py | ash-pramila/chiron | ed207d52766fcce48ebc884ac97185b2901161d4 | [
"Apache-2.0"
] | null | null | null | kairon/utils.py | ash-pramila/chiron | ed207d52766fcce48ebc884ac97185b2901161d4 | [
"Apache-2.0"
] | 1 | 2021-01-29T22:20:59.000Z | 2021-01-29T22:20:59.000Z | kairon/utils.py | ash-pramila/chiron | ed207d52766fcce48ebc884ac97185b2901161d4 | [
"Apache-2.0"
] | null | null | null | import os
import re
import shutil
import string
import tempfile
from datetime import datetime, timedelta
from glob import glob, iglob
from html import escape
from pathlib import Path
from io import BytesIO
from secrets import choice
from smtplib import SMTP
from typing import Text, List, Dict
from rasa.utils.endpoints import EndpointConfig
import requests
import yaml
from fastapi.security import OAuth2PasswordBearer
from jwt import encode, decode
from mongoengine import StringField, ListField
from mongoengine.document import BaseDocument, Document
from mongoengine.errors import ValidationError
from passlib.context import CryptContext
from password_strength import PasswordPolicy
from password_strength.tests import Special, Uppercase, Numbers, Length
from pymongo.errors import InvalidURI
from pymongo.uri_parser import (
SRV_SCHEME_LEN,
SCHEME,
SCHEME_LEN,
SRV_SCHEME,
parse_userinfo,
)
from rasa.shared.constants import DEFAULT_CONFIG_PATH, DEFAULT_DATA_PATH, DEFAULT_DOMAIN_PATH
from rasa.shared.constants import DEFAULT_MODELS_PATH
from rasa.shared.nlu.constants import TEXT
from rasa.core import config as configuration
from rasa.core.tracker_store import MongoTrackerStore
from rasa.shared.core.training_data.structures import StoryGraph
from rasa.shared.importers.rasa import Domain
from rasa.nlu.components import ComponentBuilder
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.formats.markdown import MarkdownReader
from rasa.shared.nlu.training_data import entities_parser
from smart_config import ConfigLoader
from validators import ValidationFailure
from validators import email as mail_check
from .action_server.data_objects import HttpActionConfig
from .api.models import HttpActionParametersResponse, HttpActionConfigResponse
from .data_processor.constant import TRAINING_DATA_GENERATOR_STATUS
from .exceptions import AppException
from kairon.data_processor.cache import InMemoryAgentCache
from loguru import logger
class Utility:
"""Class contains logic for various utilities"""
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
environment = {}
password_policy = PasswordPolicy.from_names(
length=8, # min length: 8
uppercase=1, # need min. 1 uppercase letters
numbers=1, # need min. 1 digits
special=1, # need min. 1 special characters
)
markdown_reader = MarkdownReader()
email_conf = {}
@staticmethod
def check_empty_string(value: str):
"""
checks for empty string
:param value: string value
:return: boolean
"""
if not value:
return True
if not value.strip():
return True
else:
return False
@staticmethod
def prepare_nlu_text(example: Text, entities: List[Dict]):
"""
combines plain text and entities into training example format
:param example: training example plain text
:param entities: list of entities
:return: trianing example combine with enities
"""
if not Utility.check_empty_string(example):
if entities:
for entity in entities:
example = example.replace(
entity["value"],
"[" + entity["value"] + "](" + entity["entity"] + ")",
)
return example
@staticmethod
def validate_document_list(documents: List[BaseDocument]):
"""
validates list of documents
:param documents: list of documents
:return: None
"""
if documents:
for document in documents:
document.validate()
@staticmethod
def load_yaml(file: Text):
"""
loads yaml file
:param file: yaml file path
:return: dict
"""
with open(file) as fp:
return yaml.safe_load(fp)
@staticmethod
def load_evironment():
"""
Loads the environment variables and their values from the
system.yaml file for defining the working environment of the app
:return: None
"""
Utility.environment = ConfigLoader(os.getenv("system_file", "./system.yaml")).get_config()
@staticmethod
def validate_fields(fields: Dict, data: Dict):
"""
validate fields
:param fields: fields
:param data: data
:return: None
"""
error = ""
for key, value in fields.items():
if isinstance(value, StringField):
if data[key] != None and str(data["key"]).strip():
error += "\n " + key + " cannot be empty or blank spaces"
elif isinstance(value, ListField):
if value.required and value:
error += "\n " + key + " cannot be empty"
if error:
raise error
@staticmethod
def is_exist(
document: Document, exp_message: Text = None, raise_error=True, *args, **kwargs
):
"""
check if document exist
:param document: document type
:param exp_message: exception message
:param raise_error: boolean to raise exception
:param kwargs: filter parameters
:return: boolean
"""
doc = document.objects(**kwargs)
if doc.__len__():
if raise_error:
if Utility.check_empty_string(exp_message):
raise AppException("Exception message cannot be empty")
raise AppException(exp_message)
else:
return True
else:
if not raise_error:
return False
@staticmethod
def verify_password(plain_password, hashed_password):
"""
verify password on constant time
:param plain_password: user password
:param hashed_password: saved password
:return: boolean
"""
return Utility.pwd_context.verify(plain_password, hashed_password)
@staticmethod
def get_password_hash(password):
"""
convert plain password to hashed
:param password: plain password
:return: hashed password
"""
if not Utility.check_empty_string(password):
return Utility.pwd_context.hash(password)
@staticmethod
def get_latest_file(folder):
"""
fetches latest file from folder
:param folder: folder path
:return: latest file
"""
if not os.path.exists(folder):
raise AppException("Folder does not exists!")
return max(iglob(folder + "/*"), key=os.path.getctime)
@staticmethod
def check_empty_list_elements(items: List[Text]):
"""
checks if any of the input strings are empty
:param items: text list
:return: boolean
"""
for item in items:
if Utility.check_empty_string(item):
return True
return False
@staticmethod
def deploy_model(endpoint: Dict, bot: Text):
"""
deploys the model to the specified endpoint
:param endpoint: endpoint configuration
:param bot: bot id
:return: endpoint deployed response
"""
if not endpoint or not endpoint.get("bot_endpoint"):
raise AppException("Please configure the bot endpoint for deployment!")
headers = {"Content-type": "application/json", "Accept": "text/plain"}
url = endpoint["bot_endpoint"].get("url")
if endpoint["bot_endpoint"].get("token_type") and endpoint["bot_endpoint"].get(
"token"
):
headers["Authorization"] = (
endpoint["bot_endpoint"].get("token_type")
+ " "
+ endpoint["bot_endpoint"].get("token")
)
try:
model_file = Utility.get_latest_file(os.path.join(DEFAULT_MODELS_PATH, bot))
response = requests.put(
url + "/model", json={"model_file": model_file}, headers=headers,
)
if response.status_code == 204:
result = "Model was successfully replaced."
else:
json_response = response.json()
if isinstance(json_response, str):
result = escape(json_response)
elif isinstance(json_response, dict):
if "message" in json_response:
result = escape(json_response["message"])
elif "reason" in json_response:
result = escape(json_response["reason"])
else:
result = None
else:
result = None
except requests.exceptions.ConnectionError:
raise AppException("Host is not reachable")
except Exception as e:
raise AppException(e)
return result, model_file
@staticmethod
def generate_password(size=8, chars=string.ascii_letters + string.digits):
"""
generates password
:param size: size of password
:param chars: password combination
:return: generated password
"""
return "".join(choice(chars) for _ in range(size))
@staticmethod
def save_files(nlu: bytes, domain: bytes, stories: bytes, config: bytes):
"""
convert mongo data to individual files
:param nlu: nlu data
:param domain: domain data
:param stories: stories data
:param config: config data
:return: files path
"""
temp_path = tempfile.mkdtemp()
data_path = os.path.join(temp_path, DEFAULT_DATA_PATH)
os.makedirs(data_path)
nlu_path = os.path.join(data_path, "nlu.md")
domain_path = os.path.join(temp_path, DEFAULT_DOMAIN_PATH)
stories_path = os.path.join(data_path, "stories.md")
config_path = os.path.join(temp_path, DEFAULT_CONFIG_PATH)
Utility.write_to_file(nlu_path, nlu)
Utility.write_to_file(domain_path, domain)
Utility.write_to_file(stories_path, stories)
Utility.write_to_file(config_path, config)
return temp_path
@staticmethod
def write_to_file(file: Text, data: bytes):
"""
open file in binary mode
:param file: file path
:param data: data to write
:return: None
"""
with open(file, "wb") as w:
w.write(data)
w.flush()
@staticmethod
def delete_directory(path: Text):
"""
deletes directory with all files
:param path: directory path
:return: None
"""
shutil.rmtree(path)
@staticmethod
def create_zip_file(
nlu: TrainingData, domain: Domain, stories: StoryGraph, config: Dict, bot: Text
):
"""
adds training files to zip
:param nlu: nlu data
:param domain: domain data
:param stories: stories data
:param config: config data
:param bot: bot id
:return: None
"""
directory = Utility.save_files(
nlu.nlu_as_markdown().encode(),
domain.as_yaml().encode(),
stories.as_story_string().encode(),
yaml.dump(config).encode(),
)
zip_path = os.path.join(tempfile.gettempdir(), bot)
zip_file = shutil.make_archive(zip_path, format="zip", root_dir=directory)
shutil.rmtree(directory)
return zip_file
@staticmethod
def load_file_in_memory(file: Text):
"""
load file in memory
:param file: file path
:return: bytes
"""
data = BytesIO()
with open(file, "rb") as fo:
data.write(fo.read())
data.seek(0)
os.remove(file)
return data
@staticmethod
def valid_password(password: Text):
"""
validate password against password policy
:param password: password
:return: None
:exception: list of failed policies
"""
results = Utility.password_policy.test(password)
if results:
response = []
for result in results:
if isinstance(result, Length):
response.append("Password length must be " + str(result.length))
elif isinstance(result, Special):
response.append("Missing " + str(result.count) + " special letter")
elif isinstance(result, Uppercase):
response.append(
"Missing " + str(result.count) + " uppercase letter"
)
elif isinstance(result, Numbers):
response.append("Missing " + str(result.count) + "number")
if response:
raise AppException("\n".join(response))
@staticmethod
def delete_document(documents: List[Document], bot: Text, user: Text, **kwargs):
"""
perform soft delete on list of mongo collections
:param documents: list of mongo collections
:param bot: bot id
:param user: user id
:return: NONE
"""
for document in documents:
kwargs['bot'] = bot
update = {'set__user': user, 'set__timestamp': datetime.utcnow()}
if "status" in document._db_field_map:
kwargs['status'] = True
update['set__status'] = False
fetched_documents = document.objects(**kwargs)
if fetched_documents.count() > 0:
fetched_documents.update(**update)
@staticmethod
def extract_user_password(uri: str):
"""
extract username, password and host with port from mongo uri
:param uri: mongo uri
:return: username, password, scheme, hosts
"""
if uri.startswith(SCHEME):
scheme_free = uri[SCHEME_LEN:]
scheme = uri[:SCHEME_LEN]
elif uri.startswith(SRV_SCHEME):
scheme_free = uri[SRV_SCHEME_LEN:]
scheme = uri[:SRV_SCHEME_LEN]
else:
raise InvalidURI(
"Invalid URI scheme: URI must "
"begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME)
)
if not scheme_free:
raise InvalidURI("Must provide at least one hostname or IP.")
host_part, _, _ = scheme_free.partition("/")
if "@" in host_part:
userinfo, _, hosts = host_part.rpartition("@")
user, passwd = parse_userinfo(userinfo)
return user, passwd, scheme + hosts
else:
return None, None, scheme + host_part
@staticmethod
def get_local_mongo_store(bot: Text, domain: Domain):
"""
create local mongo tracker
:param bot: bot id
:param domain: domain data
:return: mongo tracker
"""
username, password, url, db_name = Utility.get_local_db()
return MongoTrackerStore(
domain=domain,
host=url,
db=db_name,
collection=bot,
username=username,
password=password,
)
@staticmethod
def special_match(strg, search=re.compile(r"[^a-zA-Z0-9_]").search):
"""
check if string contains special character other than allowed ones
:param strg: text value
:param search: search pattern
:return: boolen
"""
return bool(search(strg))
@staticmethod
def extract_text_and_entities(text: Text):
"""
extract entities and plain text from markdown intent example
:param text: markdown intent example
:return: plain intent, list of extracted entities
"""
example = entities_parser.parse_training_example(text)
return example.get(TEXT), example.get('entities',[])
@staticmethod
def __extract_response_button(buttons: Dict):
"""
used to prepare ResponseButton by extracting buttons configuration from bot utterance
:param buttons: button configuration in bot response
:return: yields ResponseButton
"""
from .data_processor.data_objects import ResponseButton
for button in buttons:
yield ResponseButton._from_son(button)
@staticmethod
def prepare_response(value: Dict):
"""
used to prepare bot utterance either Text or Custom for saving in Mongo
:param value: utterance value
:return: response type, response object
"""
from .data_processor.constant import RESPONSE
from .data_processor.data_objects import ResponseText, ResponseCustom
if RESPONSE.Text.value in value:
response_text = ResponseText()
response_text.text = str(value[RESPONSE.Text.value]).strip()
if RESPONSE.IMAGE.value in value:
response_text.image = value[RESPONSE.IMAGE.value]
if RESPONSE.CHANNEL.value in value:
response_text.channel = value["channel"]
if RESPONSE.BUTTONS.value in value:
response_text.buttons = list(
Utility.__extract_response_button(value[RESPONSE.BUTTONS.value])
)
data = response_text
response_type = "text"
elif RESPONSE.CUSTOM.value in value:
data = ResponseCustom._from_son(
{RESPONSE.CUSTOM.value: value[RESPONSE.CUSTOM.value]}
)
response_type = "custom"
else:
response_type = None
data =None
return response_type, data
@staticmethod
def list_directories(path: Text):
"""
list all the directories in given path
:param path: directory path
:return: list of directories
"""
return list(os.listdir(path))
@staticmethod
def list_files(path: Text, extensions=None):
"""
list all the files in directory
:param path: directory path
:param extensions: extension to search
:return: file list
"""
if extensions is None:
extensions = ["yml", "yaml"]
files = [glob(os.path.join(path, "*." + extension)) for extension in extensions]
return sum(files, [])
@staticmethod
def validate_rasa_config(config: Dict):
"""
validates bot config.yml content for invalid entries
:param config: configuration
:return: None
"""
rasa_config = RasaNLUModelConfig(config)
component_builder = ComponentBuilder()
for i in range(len(rasa_config.pipeline)):
component_cfg = rasa_config.for_component(i)
component_builder.create_component(component_cfg, rasa_config)
configuration.load(config)
@staticmethod
def load_email_configuration():
"""
Loads the variables from the
email.yaml file
"""
Utility.email_conf = ConfigLoader(os.getenv("EMAIL_CONF", "./email.yaml")).get_config()
@staticmethod
async def validate_and_send_mail(email: str, subject: str, body: str):
"""
Used to validate the parameters of the mail to be sent
:param email: email id of the recipient
:param subject: subject of the mail
:param body: body or message of the mail
:return: None
"""
if isinstance(mail_check(email), ValidationFailure):
raise AppException("Please check if email is valid")
if (
Utility.check_empty_string(subject)
or Utility.check_empty_string(body)
):
raise ValidationError(
"Subject and body of the mail cannot be empty or blank space"
)
await Utility.trigger_smtp(email, subject, body)
@staticmethod
async def trigger_smtp(email: str, subject: str, body: str):
"""
Sends an email to the mail id of the recipient
:param email: the mail id of the recipient
:param subject: the subject of the mail
:param body: the body of the mail
:return: None
"""
smtp = SMTP(Utility.email_conf["email"]["sender"]["service"],
port=Utility.email_conf["email"]["sender"]["port"])
smtp.connect(Utility.email_conf["email"]["sender"]["service"], Utility.email_conf["email"]["sender"]["port"])
if Utility.email_conf["email"]["sender"]["tls"]:
smtp.starttls()
smtp.login(Utility.email_conf["email"]["sender"]["userid"] if
Utility.email_conf["email"]["sender"]["userid"] else
Utility.email_conf["email"]["sender"]["email"],
Utility.email_conf["email"]["sender"]["password"])
from_addr = Utility.email_conf["email"]["sender"]["email"]
msg = "From: %s\nTo: %s\nSubject: %s\n\n\n%s" % (from_addr, email, subject, body)
msg = msg.encode('utf-8')
smtp.sendmail(from_addr, email, msg)
smtp.quit()
@staticmethod
def generate_token(email: str, minutes_to_expire=1440):
"""
Used to encode the mail id into a token
:param email: mail id of the recipient
:param minutes_to_expire: time in minutes until the token expires
:return: the token with encoded mail id
"""
data = {"mail_id": email}
expire = datetime.utcnow() + timedelta(minutes=minutes_to_expire)
data.update({"exp": expire})
encoded_jwt = encode(
data,
Utility.environment['security']["secret_key"],
algorithm=Utility.environment['security']["algorithm"],
).decode("utf-8")
return encoded_jwt
@staticmethod
def verify_token(token: str):
"""
Used to check if token is valid
:param token: the token from the confirmation link
:return: mail id
"""
try:
decoded_jwt = decode(
token,
Utility.environment['security']["secret_key"],
algorithm=Utility.environment['security']["algorithm"],
)
mail = decoded_jwt["mail_id"]
return mail
except Exception:
raise AppException("Invalid token")
@staticmethod
def get_local_db():
db_url = Utility.environment['database']["url"]
db_name = Utility.environment['database']["test_db"]
username, password, url = Utility.extract_user_password(db_url)
return username, password, url, db_name
@staticmethod
def get_timestamp_previous_month(month: int):
start_time = datetime.now() - timedelta(month * 30, seconds=0, minutes=0, hours=0)
return start_time.timestamp()
@staticmethod
def build_http_response_object(http_action_config: HttpActionConfig, user: str, bot: str):
"""
Builds a new HttpActionConfigResponse object from HttpActionConfig object.
:param http_action_config: HttpActionConfig object containing configuration for the Http action
:param user: user id
:param bot: bot id
:return: HttpActionConfigResponse containing configuration for Http action
"""
http_params = [
HttpActionParametersResponse(key=param.key, value=param.value, parameter_type=param.parameter_type)
for param in
http_action_config.params_list]
response = HttpActionConfigResponse(
auth_token=http_action_config.auth_token,
action_name=http_action_config.action_name,
response=http_action_config.response,
http_url=http_action_config.http_url,
request_method=http_action_config.request_method,
params_list=http_params,
user=user,
bot=bot
)
return response
@staticmethod
def create_cache():
return InMemoryAgentCache()
@staticmethod
def train_model_event(bot: str, user: str, token: str = None):
event_url = Utility.environment['model']['train']['event_url']
logger.info("model training event started")
response = requests.post(event_url, headers={'content-type': 'application/json'}, json={'bot': bot, 'user': user, 'token': token})
logger.info("model training event completed"+response.content.decode('utf8'))
@staticmethod
def trigger_data_generation_event(bot: str, user: str, token: str):
try:
event_url = Utility.environment['data_generation']['event_url']
kairon_url = Utility.environment['data_generation']['kairon_url']
logger.info("Training data generator event started")
response = requests.post(event_url, headers={'content-type': 'application/json'},
json={'user': user, 'kairon_url': kairon_url, 'token': token})
logger.info("Training data generator event completed" + response.content.decode('utf8'))
except Exception as e:
logger.error(str(e))
from .data_processor.processor import TrainingDataGenerationProcessor
TrainingDataGenerationProcessor.set_status(bot=bot,
user=user,
status=TRAINING_DATA_GENERATOR_STATUS.FAIL.value,
exception=str(e))
@staticmethod
def http_request(method: str, url: str, token: str, user: str, json: Dict = None):
logger.info("agent event started "+url)
headers = {'content-type': 'application/json', 'X-USER': user}
if token:
headers['Authorization'] = 'Bearer '+token
response = requests.request(method, url, headers=headers, json=json)
logger.info("agent event completed" + response.content.decode('utf8'))
return response.content.decode('utf8')
@staticmethod
def get_action_url(endpoint):
if endpoint and endpoint.get("action_endpoint"):
return EndpointConfig(url=endpoint["action_endpoint"]["url"])
elif Utility.environment['action'].get('url'):
return EndpointConfig(url=Utility.environment['action'].get('url'))
else:
return None
@staticmethod
async def upload_document(doc):
if not (doc.filename.lower().endswith('.pdf') or doc.filename.lower().endswith('.docx')):
raise AppException("Invalid File Format")
folder_path = 'data_generator'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
destination = os.path.join(folder_path, doc.filename)
with Path(destination).open("wb") as buffer:
shutil.copyfileobj(doc.file, buffer)
return destination
| 35.072351 | 138 | 0.605761 |
7b301c4007e743040747f7e65e13bce765dcfb92 | 3,022 | py | Python | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Expresiones/Logica.py | 18SebastianVC/tytus | 2b22f4339356b6cf46e3235a5219f68e5ba5573b | [
"MIT"
] | 1 | 2021-01-09T05:32:35.000Z | 2021-01-09T05:32:35.000Z | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Expresiones/Logica.py | XiomRB/tytus | 0873e4bdce5c110bee6ef2aa98240be6a93ae024 | [
"MIT"
] | null | null | null | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Expresiones/Logica.py | XiomRB/tytus | 0873e4bdce5c110bee6ef2aa98240be6a93ae024 | [
"MIT"
] | null | null | null | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
class Logica(Instruccion):
def __init__(self, opIzq, opDer, operador, linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.BOOLEAN),linea,columna)
self.opIzq = opIzq
self.opDer = opDer
self.operador = operador
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
# Operación con dos operadores
if(self.opDer != None):
# Si existe algún error en el operador izquierdo, retorno el error.
resultadoIzq = self.opIzq.ejecutar(tabla, arbol)
if isinstance(resultadoIzq, Excepcion):
return resultadoIzq
# Si existe algún error en el operador derecho, retorno el error.
resultadoDer = self.opDer.ejecutar(tabla, arbol)
if isinstance(resultadoDer, Excepcion):
return resultadoDer
# Comprobamos el tipo de operador
if self.operador == 'OR':
if self.opIzq.tipo.tipo == Tipo_Dato.BOOLEAN and self.opDer.tipo.tipo == Tipo_Dato.BOOLEAN:
return resultadoIzq or resultadoDer
else:
error = Excepcion('42804',"Semántico","El argumento de OR debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
return error
elif self.operador == 'AND':
if self.opIzq.tipo.tipo == Tipo_Dato.BOOLEAN and self.opDer.tipo.tipo == Tipo_Dato.BOOLEAN:
return resultadoIzq and resultadoDer
else:
error = Excepcion('42804',"Semántico","El argumento de AND debe ser de tipo boolean",self.linea,self.columna)
arbol.excepciones.append(error)
return error
else:
error = Excepcion('42804',"Semántico","Operador desconocido.",self.linea,self.columna)
arbol.excepciones.append(error)
return error
# Operación unaria
else:
# Si existe algún error en el operador izquierdo, retorno el error.
resultadoIzq = self.opIzq.ejecutar(tabla, arbol)
if isinstance(resultadoIzq, Excepcion):
return resultadoIzq
if self.operador == 'NOT':
if self.opIzq.tipo.tipo == Tipo_Dato.BOOLEAN:
return resultadoIzq and resultadoDer
else:
error = Excepcion('42804',"Semántico","Tipo de datos incorrectos en la operación lógica not",self.linea,self.columna)
arbol.excepciones.append(error)
return error
else:
error = Excepcion('42804',"Semántico","Operador desconocido.",self.linea,self.columna)
arbol.excepciones.append(error)
return error | 51.220339 | 137 | 0.599934 |
4cf74c557b39faf531999a2ea1bdbf93e992e00a | 7,882 | py | Python | raiden/utils/upgrades.py | marcosmartinez7/lumino | 2a5a74589aaf26172cee6ec23fde5f4fc1938a43 | [
"MIT"
] | 8 | 2019-06-12T14:50:06.000Z | 2022-02-15T16:20:07.000Z | raiden/utils/upgrades.py | marcosmartinez7/lumino | 2a5a74589aaf26172cee6ec23fde5f4fc1938a43 | [
"MIT"
] | 141 | 2019-06-18T13:04:08.000Z | 2021-11-23T22:00:32.000Z | raiden/utils/upgrades.py | marcosmartinez7/lumino | 2a5a74589aaf26172cee6ec23fde5f4fc1938a43 | [
"MIT"
] | 17 | 2019-05-21T18:09:05.000Z | 2020-10-29T13:01:01.000Z | import os
import sqlite3
from contextlib import closing
from glob import escape, glob
from pathlib import Path
import filelock
import structlog
from raiden.constants import RAIDEN_DB_VERSION
from raiden.storage.migrations.v16_to_v17 import upgrade_v16_to_v17
from raiden.storage.migrations.v17_to_v18 import upgrade_v17_to_v18
from raiden.storage.migrations.v18_to_v19 import upgrade_v18_to_v19
from raiden.storage.migrations.v19_to_v20 import upgrade_v19_to_v20
from raiden.storage.migrations.v20_to_v21 import upgrade_v20_to_v21
from raiden.storage.migrations.v21_to_v22 import upgrade_v21_to_v22
from raiden.storage.sqlite import SQLiteStorage
from raiden.storage.versions import VERSION_RE, filter_db_names, latest_db_file
from raiden.utils.typing import Callable, List, NamedTuple
class UpgradeRecord(NamedTuple):
from_version: int
function: Callable
UPGRADES_LIST = [
UpgradeRecord(from_version=16, function=upgrade_v16_to_v17),
UpgradeRecord(from_version=17, function=upgrade_v17_to_v18),
UpgradeRecord(from_version=18, function=upgrade_v18_to_v19),
UpgradeRecord(from_version=19, function=upgrade_v19_to_v20),
UpgradeRecord(from_version=20, function=upgrade_v20_to_v21),
UpgradeRecord(from_version=21, function=upgrade_v21_to_v22),
]
log = structlog.get_logger(__name__)
def get_file_lock(db_filename: Path):
lock_file_name = f"{db_filename}.lock"
return filelock.FileLock(lock_file_name)
def update_version(storage: SQLiteStorage, version: int):
cursor = storage.conn.cursor()
cursor.execute(
'INSERT OR REPLACE INTO settings(name, value) VALUES("version", ?)', (str(version),)
)
def get_file_version(db_path: Path) -> int:
match = VERSION_RE.match(os.path.basename(db_path))
assert match, f'Database name "{db_path}" does not match our format'
file_version = int(match.group(1))
return file_version
def get_db_version(db_filename: Path) -> int:
"""Return the version value stored in the db"""
assert os.path.exists(db_filename)
# Perform a query directly through SQL rather than using
# storage.get_version()
# as get_version will return the latest version if it doesn't
# find a record in the database.
conn = sqlite3.connect(str(db_filename), detect_types=sqlite3.PARSE_DECLTYPES)
cursor = conn.cursor()
try:
cursor.execute('SELECT value FROM settings WHERE name="version";')
result = cursor.fetchone()
except sqlite3.OperationalError:
raise RuntimeError("Corrupted database. Database does not the settings table.")
if not result:
raise RuntimeError(
"Corrupted database. Settings table does not contain an entry the db version."
)
return int(result[0])
def _copy(old_db_filename, current_db_filename):
old_conn = sqlite3.connect(old_db_filename, detect_types=sqlite3.PARSE_DECLTYPES)
current_conn = sqlite3.connect(current_db_filename, detect_types=sqlite3.PARSE_DECLTYPES)
with closing(old_conn), closing(current_conn):
old_conn.backup(current_conn)
def delete_dbs_with_failed_migrations(valid_db_names: List[Path]) -> None:
for db_path in valid_db_names:
file_version = get_file_version(db_path)
with get_file_lock(db_path):
db_version = get_db_version(db_path)
# The version matches, nothing to do.
if db_version == file_version:
continue
elif db_version > file_version:
raise RuntimeError(
f"Impossible database version. "
f"The database {db_path} has too high a version ({db_version}), "
f"this should never happen."
)
# The version number in the database is smaller then the current
# target, this means that a migration failed to execute and the db
# is partially upgraded.
else:
os.remove(db_path)
class UpgradeManager:
""" Run migrations when a database upgrade is necesary.
Skip the upgrade if either:
- There is no previous DB
- There is a current DB file and the version in settings matches.
Upgrade procedure:
- Delete corrupted databases.
- Copy the old file to the latest version (e.g. copy version v16 as v18).
- In a transaction: Run every migration. Each migration must decide whether
to proceed or not.
"""
def __init__(self, db_filename: str, **kwargs):
base_name = os.path.basename(db_filename)
match = VERSION_RE.match(base_name)
assert match, f'Database name "{base_name}" does not match our format'
self._current_db_filename = Path(db_filename)
self._current_version = get_file_version(self._current_db_filename)
self._kwargs = kwargs
def run(self):
# First clear up any partially upgraded databases.
#
# A database will be partially upgraded if the process receives a
# SIGKILL/SIGINT while executing migrations. NOTE: It's very probable
# the content of the database remains consistent, because the upgrades
# are executed inside a migration, however making a second copy of the
# database does no harm.
escaped_path = escape(str(self._current_db_filename.parent))
paths = glob(f"{escaped_path}/v*_log.db")
valid_db_names = filter_db_names(paths)
delete_dbs_with_failed_migrations(valid_db_names)
# At this point we know every file version and db version match
# (assuming there are no concurrent runs).
paths = glob(f"{escaped_path}/v*_log.db")
valid_db_names = filter_db_names(paths)
latest_db_path = latest_db_file(valid_db_names)
# First run, there is no database file available
if latest_db_path is None:
return
file_version = get_file_version(latest_db_path)
# The latest version matches our target version, nothing to do.
if file_version == RAIDEN_DB_VERSION:
return
if file_version > RAIDEN_DB_VERSION:
raise RuntimeError(
f"Conflicting database versions detected, latest db version is v{file_version}, "
f"Raiden client version is v{RAIDEN_DB_VERSION}."
f"\n\n"
f"Running a downgraded version of Raiden after an upgrade is not supported, "
f"because the transfers done with the new client are not understandable by the "
f"older."
)
self._upgrade(
target_file=str(self._current_db_filename),
from_file=latest_db_path,
from_version=file_version,
)
def _upgrade(self, target_file: Path, from_file: Path, from_version: int):
with get_file_lock(from_file), get_file_lock(target_file):
_copy(from_file, target_file)
storage = SQLiteStorage(target_file)
log.debug(f"Upgrading database from v{from_version} to v{RAIDEN_DB_VERSION}")
try:
version_iteration = from_version
with storage.transaction():
for upgrade_record in UPGRADES_LIST:
if upgrade_record.from_version < from_version:
continue
version_iteration = upgrade_record.function(
storage=storage,
old_version=version_iteration,
current_version=RAIDEN_DB_VERSION,
**self._kwargs,
)
update_version(storage, RAIDEN_DB_VERSION)
except BaseException as e:
log.error(f"Failed to upgrade database: {e}")
raise
storage.conn.close()
| 36.155963 | 97 | 0.672164 |
03d18c85c23be1038ffa5e0acd7a787025ff8a46 | 4,619 | py | Python | testproj/snippets/serializers.py | Zeeshan138063/Documentaion-drf-yasg | 4ee4f4824e653259ae45c9a443c93a01d2e13df0 | [
"BSD-3-Clause"
] | null | null | null | testproj/snippets/serializers.py | Zeeshan138063/Documentaion-drf-yasg | 4ee4f4824e653259ae45c9a443c93a01d2e13df0 | [
"BSD-3-Clause"
] | null | null | null | testproj/snippets/serializers.py | Zeeshan138063/Documentaion-drf-yasg | 4ee4f4824e653259ae45c9a443c93a01d2e13df0 | [
"BSD-3-Clause"
] | null | null | null | from decimal import Decimal
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework.compat import MaxLengthValidator, MinValueValidator
from snippets.models import LANGUAGE_CHOICES, STYLE_CHOICES, Snippet
class LanguageSerializer(serializers.Serializer):
name = serializers.ChoiceField(
choices=LANGUAGE_CHOICES, default='python', help_text='The name of the programming language')
read_only_nullable = serializers.CharField(read_only=True, allow_null=True)
class Meta:
ref_name = None
class ExampleProjectSerializer(serializers.Serializer):
project_name = serializers.CharField(help_text='Name of the project')
github_repo = serializers.CharField(required=True, help_text='Github repository of the project')
class Meta:
ref_name = 'Project'
class UnixTimestampField(serializers.DateTimeField):
def to_representation(self, value):
""" Return epoch time for a datetime object or ``None``"""
from django.utils.dateformat import format
try:
return int(format(value, 'U'))
except (AttributeError, TypeError):
return None
def to_internal_value(self, value):
import datetime
return datetime.datetime.fromtimestamp(int(value))
class Meta:
swagger_schema_fields = {
'format': 'integer',
'title': 'Client date time suu',
'description': 'Date time in unix timestamp format',
}
class SnippetSerializer(serializers.Serializer):
"""SnippetSerializer classdoc
create: docstring for create from serializer classdoc
"""
id = serializers.IntegerField(read_only=True, help_text="id serializer help text")
created = UnixTimestampField(read_only=True)
owner = serializers.PrimaryKeyRelatedField(
queryset=get_user_model().objects.all(),
default=serializers.CurrentUserDefault(),
help_text="The ID of the user that created this snippet; if none is provided, "
"defaults to the currently logged in user."
)
owner_as_string = serializers.PrimaryKeyRelatedField(
help_text="The ID of the user that created this snippet.",
pk_field=serializers.CharField(help_text="this help text should not show up"),
read_only=True,
source='owner',
)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
code = serializers.CharField(style={'base_template': 'textarea.html'})
tags = serializers.ListField(child=serializers.CharField(min_length=2), min_length=3, max_length=15)
linenos = serializers.BooleanField(required=False)
language = LanguageSerializer(help_text="Sample help text for language")
styles = serializers.MultipleChoiceField(choices=STYLE_CHOICES, default=['friendly'])
lines = serializers.ListField(child=serializers.IntegerField(), allow_empty=True, allow_null=True, required=False)
example_projects = serializers.ListSerializer(child=ExampleProjectSerializer(), read_only=True,
validators=[MaxLengthValidator(100)])
difficulty_factor = serializers.FloatField(help_text="this is here just to test FloatField",
read_only=True, default=lambda: 6.9)
rate_as_string = serializers.DecimalField(max_digits=6, decimal_places=3, default=Decimal('0.0'),
validators=[MinValueValidator(Decimal('0.0'))])
rate = serializers.DecimalField(max_digits=6, decimal_places=3, default=Decimal('0.0'), coerce_to_string=False,
validators=[MinValueValidator(Decimal('0.0'))])
def create(self, validated_data):
"""
Create and return a new `Snippet` instance, given the validated data.
"""
del validated_data['styles']
del validated_data['lines']
del validated_data['difficulty_factor']
return Snippet.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance
| 44.84466 | 118 | 0.688894 |
159b2d7b1c5b2ba047f9b3e361eb6c5694dd34bd | 3,924 | py | Python | splatsync.py | robla/pinsplat | 1976c5f10ae669f243ff506b01fac62a59314baa | [
"MIT"
] | null | null | null | splatsync.py | robla/pinsplat | 1976c5f10ae669f243ff506b01fac62a59314baa | [
"MIT"
] | null | null | null | splatsync.py | robla/pinsplat | 1976c5f10ae669f243ff506b01fac62a59314baa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import configparser
import git
import os
import re
import shutil
import subprocess
import sys
import time
import urllib.request
from base10x60timestamp.b1060time import get_b1060_timestamp_from_epoch
def parse_arguments():
""" see http://docs.python.org/library/argparse """
parser = argparse.ArgumentParser(
description='Sync Pinboard with local data')
parser.add_argument('dest', help='optional download destination',
nargs='*', default=None)
parser.add_argument('--nocommit', help='no commits to git repos',
action="store_true")
parser.add_argument('--nofetch', help='no retrieving export from pinboard.in',
action="store_true")
return parser.parse_args()
def load_config_file():
configfile = os.path.expanduser("~/.pinboardrc")
config = configparser.RawConfigParser()
with open(configfile, "r") as f:
config.read_file(f)
return config
def get_pinboard_apitoken(config):
api_token = config.get("authentication", "api_token")
return api_token
# TODO - turn pinboard_splatter into library that I can import easily
# TODO - port (back) to python3
def run_pinboard_splatter(exportfile, message, commitflag):
config = load_config_file()
# splatter data
splatter_data = config.get("splatter", "data_dir")
#xxx cd ~/pinboard/pinboard-splatter/data
os.chdir(splatter_data)
if(True):
out_bytes = subprocess.check_output(['pinsplat', exportfile])
if commitflag:
repo = git.Repo('.')
regexp = re.compile(r'(hash/json/|hash/mime/)')
addthese = [x for x in repo.untracked_files if regexp.match(x)]
modded = [x.a_path for x in repo.index.diff(None).iter_change_type('M')]
if (len(addthese) + len(modded)) > 0:
repo.index.add(addthese)
repo.index.add(modded)
repo.index.commit(message)
return True
def main(argv=None):
""" Sync Pinboard with local data """
# initialize all the configuration
args = parse_arguments()
commitflag = (not args.nocommit)
fetchflag = (not args.nofetch)
config = load_config_file()
pbauthkey = get_pinboard_apitoken(config)
b1060str = get_b1060_timestamp_from_epoch(time.time())
backupdir = config.get("backup", "backup_dir")
filepart = "pinboard_export-" + b1060str + ".json"
pbexportfile = os.path.join(backupdir, filepart)
baseurl = 'https://api.pinboard.in/v1/posts/all?format=json'
authpart = '&auth_token=' + pbauthkey
if args.dest:
pbexportfile = os.path.join(os.getcwd(), args.dest[0])
filepart = os.path.basename(args.dest[0])
# get the export from pinboard.in
# fetchflag is true by default, and false if --nofetch is given
# on commandline. When false, read from local pbexportfile
if fetchflag:
urllib.request.urlretrieve(baseurl + authpart, pbexportfile)
# set up the staging area
export_stage = config.get("backup", "export_stage")
os.chdir(export_stage)
export_basename = 'pinboard-export.json'
export_fullname = os.path.join(export_stage, export_basename)
# update pinboard-export
if fetchflag or args.dest:
shutil.copy(pbexportfile, 'pinboard-export.json')
message = 'automatic update from ' + filepart
# check in the result
# commitflag is true by default, and false if --nocommit is given
# on commandline
if commitflag:
repo = git.Repo('.')
index = repo.index
modded = [x.a_path for x in repo.index.diff(None).iter_change_type('M')]
if len(modded) > 0:
index.add(['pinboard-export.json'])
index.commit(message)
else:
print('no commit')
run_pinboard_splatter(export_fullname, message, commitflag)
if __name__ == '__main__':
exit_status = main(sys.argv)
sys.exit(exit_status)
| 28.852941 | 82 | 0.675331 |
af08fa51f38d47b0371b702ec4851f6c1d496d71 | 5,721 | py | Python | plantcam.py | zmcki001/SpacePeppers | 5ef356a495772ead22cca5b46bbdc3f4119f630b | [
"MIT"
] | null | null | null | plantcam.py | zmcki001/SpacePeppers | 5ef356a495772ead22cca5b46bbdc3f4119f630b | [
"MIT"
] | null | null | null | plantcam.py | zmcki001/SpacePeppers | 5ef356a495772ead22cca5b46bbdc3f4119f630b | [
"MIT"
] | null | null | null | import os
import glob
import picamera
import ftplib
import sys
import bme280
import smbus2
from subprocess import call
from datetime import datetime
from time import sleep
from requests.exceptions import ConnectionError
# Our file path
filePath = "/home/pi/Desktop/spacepeppers2020/pics/"
picTotal = 1000000
picCount = 0
#allows use of the sensors
port = 1
address = 0x77
bus = smbus2.SMBus(port)
bme280.load_calibration_params(bus,address)
bme280_data = bme280.sample(bus,address)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
while picCount < picTotal:
# Grab the current time
currentTime = datetime.now()
# Create file name for our picture
picTime = currentTime.strftime("%Y.%m.%d-%H%M%S")
picName = picTime + '.jpg'
completeFilePath = filePath + picName
#initialize variables for counting hot readings
i1 = 0
i2 = 0
i3 = 0
i4 = 0
#insert device serial numbers here
sn1 = '' # ROOM TEMP
sn2 = '' # PLANT 1
sn3 = '' # PLANT 2
sn4 = '' # PLANT 3
#initialize all of the directories for the sensors
device_file1 = glob.glob(base_dir + sn1)[0] + '/w1_slave'
device_file2 = glob.glob(base_dir + sn2)[0] + '/w1_slave'
device_file3 = glob.glob(base_dir + sn3)[0] + '/w1_slave'
device_file4 = glob.glob(base_dir + sn4)[0] + '/w1_slave'
################################
# Routines to read each sensor #
################################
#Read Sensor 1
def read_temp_raw1():
f = open(device_file1, 'r')
lines1 = f.readlines()
f.close()
return lines1
def read_temp1():
lines1 = read_temp_raw1()
while lines1[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines1 = read_temp_raw1()
equals_pos = lines1[1].find('t=')
if equals_pos != -1:
temp_string1 = lines1[1][equals_pos+2:]
temp_c1 = float(temp_string1) / 1000.0
return temp_c1
#Read Sensor 2
def read_temp_raw2():
f = open(device_file2, 'r')
lines2 = f.readlines()
f.close()
return lines2
def read_temp2():
lines2 = read_temp_raw2()
while lines2[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines2 = read_temp_raw2()
equals_pos = lines2[1].find('t=')
if equals_pos != -1:
temp_string2 = lines2[1][equals_pos+2:]
temp_c2 = float(temp_string2) / 1000.0
return temp_c2
#Read Sensor 3
def read_temp_raw3():
f = open(device_file3, 'r')
lines3 = f.readlines()
f.close()
return lines3
def read_temp3():
lines3 = read_temp_raw3()
while lines3[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines3 = read_temp_raw3()
equals_pos = lines3[1].find('t=')
if equals_pos != -1:
temp_string3 = lines3[1][equals_pos+2:]
temp_c3 = float(temp_string3) / 1000.0
return temp_c3
#Read Sensor 4
def read_temp_raw4():
f = open(device_file4, 'r')
lines4 = f.readlines()
f.close()
return lines4
def read_temp4():
lines4 = read_temp_raw4()
while lines4[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines4 = read_temp_raw4()
equals_pos = lines4[1].find('t=')
if equals_pos != -1:
temp_string4 = lines4[1][equals_pos+2:]
temp_c4 = float(temp_string4) / 1000.0
return temp_c4
# Take picture using new filepath
with picamera.PiCamera() as camera:
camera.resolution = (1280,720)
camera.capture(completeFilePath,quality=10)
print("Picture captured")
temp1 = read_temp1()
temp2 = read_temp2()
temp3 = read_temp3()
temp4 = read_temp4()
humidity = bme280_data.humidity
pressure = bme280_data.pressure
# Create our stamp variable
degree = '\xB0'
tempMessage = " \nRoom: " + str(temp1) + degree + "C\nPlant 1: " + str(temp2) + degree + "C\nPlant 2: " + str(temp3) + degree + "C\nPlant 3: " + str(temp4) + degree + "C\nHumidity: " + str(humidity) + "\nPressure: " + str(pressure) + " hPa"
timestampMessage = currentTime.strftime("%Y.%m.%d - %H:%M:%S") + " UTC"
# Create time stamp command to have executed
timestampCommand = "/usr/bin/convert " + completeFilePath + " -pointsize 24 \
-undercolor Black -fill white -annotate +20+50 '" + timestampMessage + tempMessage + "' " + completeFilePath
# Actually execute the command!
call([timestampCommand], shell=True)
print("Picture Timestamped")
try:
# Upload to Website
filename= picName
filepath= "/home/pi/Desktop/spacepeppers2020/pics/"
un= "" # ENTER USERNAME FOR FTP HERE
pw= "" # ENTER PASSWORD FOR FTP HERE
remotefolder= "spacepeppers2020" # THIS CAN BE CHANGED TO YOUR DIRECTORY
ftp= ftplib.FTP('') # FTP SERVER PATH GOES HERE
ftp.login(un, pw)
ftp.cwd(remotefolder)
uploadfile= open(filepath+filename, 'rb')
ftp.storbinary('STOR ' + filename, uploadfile)
print('File uploaded')
except:
continue
finally:
# Advance our picture counter
print('Completed at ' + timestampMessage + '\n\n')
picCount += 1
sleep(1800)
| 31.960894 | 246 | 0.559692 |
2dbbce07d1e233b7a319092b636a5cef3de2576f | 13,894 | py | Python | rest_rpc/connection/runs.py | aimakerspace/synergos_rest | d7b45216e5d1854fe65213f06ae3f3bb6d99cab0 | [
"Apache-2.0"
] | null | null | null | rest_rpc/connection/runs.py | aimakerspace/synergos_rest | d7b45216e5d1854fe65213f06ae3f3bb6d99cab0 | [
"Apache-2.0"
] | null | null | null | rest_rpc/connection/runs.py | aimakerspace/synergos_rest | d7b45216e5d1854fe65213f06ae3f3bb6d99cab0 | [
"Apache-2.0"
] | 2 | 2022-01-21T01:06:01.000Z | 2022-01-26T01:11:06.000Z | #!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import os
# Libs
import jsonschema
from flask import request
from flask_restx import Namespace, Resource, fields
# Custom
from rest_rpc import app
from rest_rpc.connection.core.utils import TopicalPayload
from rest_rpc.training.models import model_output_model
from rest_rpc.evaluation.validations import val_output_model
from rest_rpc.evaluation.predictions import pred_output_model
from synarchive.connection import RunRecords
##################
# Configurations #
##################
SOURCE_FILE = os.path.abspath(__file__)
ns_api = Namespace(
"runs",
description='API to faciliate run management in in a PySyft Grid.'
)
schemas = app.config['SCHEMAS']
db_path = app.config['DB_PATH']
run_records = RunRecords(db_path=db_path)
logging = app.config['NODE_LOGGER'].synlog
logging.debug("connection/runs.py logged", Description="No Changes")
###########################################################
# Models - Used for marshalling (i.e. moulding responses) #
###########################################################
config_model = ns_api.model(
name="configurations",
model={
"input_size": fields.Integer(),
"output_size": fields.Integer(),
"is_condensed": fields.Boolean(),
"rounds": fields.Integer(required=True),
"epochs": fields.Integer(required=True),
"batch_size": fields.Integer(),
"lr": fields.Float(),
"lr_decay": fields.Float(),
"weight_decay": fields.Float(),
"seed": fields.Integer(),
"precision_fractional": fields.Integer(),
"use_CLR": fields.Boolean(),
"mu": fields.Float(),
"reduction": fields.String(),
"l1_lambda": fields.Float(),
"l2_lambda": fields.Float(),
"dampening": fields.Float(),
"base_lr": fields.Float(),
"max_lr": fields.Float(),
"step_size_up": fields.Integer(),
"step_size_down": fields.Integer(),
"mode": fields.String(),
"gamma": fields.Float(),
"scale_mode": fields.String(),
"cycle_momentum": fields.Boolean(),
"base_momentum": fields.Float(),
"max_momentum": fields.Float(),
"last_epoch": fields.Integer(),
"patience": fields.Integer(),
"delta": fields.Float(),
"cumulative_delta": fields.Boolean()
}
)
run_input_model = ns_api.inherit(
"run_input",
config_model,
{"run_id": fields.String()}
)
run_output_model = ns_api.inherit(
"run_output",
config_model,
{
'doc_id': fields.String(),
'kind': fields.String(),
'key': fields.Nested(
ns_api.model(
name='key',
model={
'collab_id': fields.String(),
'project_id': fields.String(),
'expt_id': fields.String(),
'run_id': fields.String()
}
),
required=True
),
'relations': fields.Nested(
ns_api.model(
name='run_relations',
model={
'Model': fields.List(
fields.Nested(model_output_model, skip_none=True)
),
'Validation': fields.List(
fields.Nested(val_output_model, skip_none=True)
),
'Prediction': fields.List(
fields.Nested(pred_output_model, skip_none=True)
)
}
),
default={},
required=True
)
}
)
payload_formatter = TopicalPayload(
subject=run_records.subject,
namespace=ns_api,
model=run_output_model
)
#############
# Resources #
#############
@ns_api.route('/')
@ns_api.response(500, 'Internal failure')
class Runs(Resource):
""" Handles the entire collection of runs as a catalogue """
@ns_api.doc("get_runs")
@ns_api.marshal_list_with(payload_formatter.plural_model)
def get(self, collab_id, project_id, expt_id):
""" Retrieve all run configurations queued for training """
all_relevant_runs = run_records.read_all(
filter={
'collab_id': collab_id,
'project_id': project_id,
'expt_id': expt_id
}
)
success_payload = payload_formatter.construct_success_payload(
status=200,
method="runs.get",
params=request.view_args,
data=all_relevant_runs
)
logging.info(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Runs: Bulk record retrieval successful!",
code=200,
description=f"Runs under experiment '{expt_id}' of project '{project_id}' were successfully retrieved!",
ID_path=SOURCE_FILE,
ID_class=Runs.__name__,
ID_function=Runs.get.__name__,
**request.view_args
)
return success_payload, 200
@ns_api.doc("register_run")
@ns_api.expect(run_input_model)
@ns_api.marshal_with(payload_formatter.singular_model)
@ns_api.response(201, "New run created!")
@ns_api.response(417, "Inappropriate run configurations passed!")
def post(self, collab_id, project_id, expt_id):
""" Takes in a set of FL training run configurations and stores it """
try:
new_run_details = request.json
run_id = new_run_details.pop('run_id')
run_records.create(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id,
details=new_run_details
)
retrieved_run = run_records.read(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id
)
success_payload = payload_formatter.construct_success_payload(
status=201,
method="runs.post",
params=request.view_args,
data=retrieved_run
)
logging.info(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Record creation successful!",
description=f"Run '{run_id}' under experiment '{expt_id}' of project '{project_id}' was successfully created!",
code=201,
ID_path=SOURCE_FILE,
ID_class=Runs.__name__,
ID_function=Runs.post.__name__,
**request.view_args
)
return success_payload, 201
except jsonschema.exceptions.ValidationError:
logging.error(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Record creation failed.",
code=417,
description="Inappropriate run configurations passed!",
ID_path=SOURCE_FILE,
ID_class=Runs.__name__,
ID_function=Runs.post.__name__,
**request.view_args
)
ns_api.abort(
code=417,
message="Inappropriate run configurations passed!"
)
@ns_api.route('/<run_id>')
@ns_api.response(404, 'Run not found')
@ns_api.response(500, 'Internal failure')
class Run(Resource):
""" Handles all TTP interactions for managing run registration """
@ns_api.doc("get_run")
@ns_api.marshal_with(payload_formatter.singular_model)
def get(self, collab_id, project_id, expt_id, run_id):
""" Retrieves all runs registered for an experiment under a project """
retrieved_run = run_records.read(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id
)
if retrieved_run:
success_payload = payload_formatter.construct_success_payload(
status=200,
method="run.get",
params=request.view_args,
data=retrieved_run
)
logging.info(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Single record retrieval successful!",
code=200,
description=f"Run '{run_id}' under experiment '{expt_id}' of project '{project_id}' was successfully retrieved!",
ID_path=SOURCE_FILE,
ID_class=Run.__name__,
ID_function=Run.get.__name__,
**request.view_args
)
return success_payload, 200
else:
logging.error(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Single record retrieval failed!",
code=404,
description=f"Run '{run_id}' does not exist for Experiment {expt_id} under Project '{project_id}'!",
ID_path=SOURCE_FILE,
ID_class=Run.__name__,
ID_function=Run.get.__name__,
**request.view_args
)
ns_api.abort(
code=404,
message=f"Run '{run_id}' does not exist for Experiment {expt_id} under Project '{project_id}'!"
)
@ns_api.doc("update_run")
@ns_api.expect(config_model)
@ns_api.marshal_with(payload_formatter.singular_model)
def put(self, collab_id, project_id, expt_id, run_id):
""" Updates a run's specified configurations IF & ONLY IF the run has
yet to begin
"""
try:
run_updates = request.json
run_records.update(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id,
updates=run_updates
)
retrieved_run = run_records.read(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id
)
success_payload = payload_formatter.construct_success_payload(
status=200,
method="run.put",
params=request.view_args,
data=retrieved_run
)
logging.info(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Record update successful!",
code=200,
description=f"Run '{run_id}' under experiment '{expt_id}' of project '{project_id}' was successfully updated!",
ID_path=SOURCE_FILE,
ID_class=Run.__name__,
ID_function=Run.put.__name__,
**request.view_args
)
return success_payload, 200
except jsonschema.exceptions.ValidationError:
logging.error(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Record update failed.",
code=417,
description="Inappropriate run configurations passed!",
ID_path=SOURCE_FILE,
ID_class=Run.__name__,
ID_function=Run.put.__name__,
**request.view_args
)
ns_api.abort(
code=417,
message="Inappropriate experimental configurations passed!"
)
@ns_api.doc("delete_run")
@ns_api.marshal_with(payload_formatter.singular_model)
def delete(self, collab_id, project_id, expt_id, run_id):
""" De-registers a previously registered run and deletes it """
retrieved_run = run_records.read(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id
)
deleted_run = run_records.delete(
collab_id=collab_id,
project_id=project_id,
expt_id=expt_id,
run_id=run_id
)
if deleted_run:
success_payload = payload_formatter.construct_success_payload(
status=200,
method="run.delete",
params=request.view_args,
data=retrieved_run
)
logging.info(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Record deletion successful!",
code=200,
description=f"Run '{run_id}' under experiment '{expt_id}' of project '{project_id}' was successfully deleted!",
ID_path=SOURCE_FILE,
ID_class=Run.__name__,
ID_function=Run.delete.__name__,
**request.view_args
)
return success_payload, 200
else:
logging.error(
f"Collaboration '{collab_id}' -> Project '{project_id}' -> Experiment '{expt_id}' -> Run '{run_id}': Record deletion failed.",
code=404,
description=f"Run '{run_id}' under experiment '{expt_id}' of project '{project_id}' does not exist!",
ID_path=SOURCE_FILE,
ID_class=Run.__name__,
ID_function=Run.delete.__name__,
**request.view_args
)
ns_api.abort(
code=404,
message=f"Run '{run_id}' does not exist in for Experiment {expt_id} under Project '{project_id}'!"
)
| 35.085859 | 155 | 0.548798 |
865907c4a94122cd5b4972c346fdae37c8c3f3d7 | 966 | py | Python | src/executor/abstract_storage_executor.py | Ashwin1934/eva | 53c1172a0f8a7409cf0ef97efea957979a8290a0 | [
"Apache-2.0"
] | 34 | 2019-12-22T06:07:40.000Z | 2022-03-19T13:16:29.000Z | src/executor/abstract_storage_executor.py | Ashwin1934/eva | 53c1172a0f8a7409cf0ef97efea957979a8290a0 | [
"Apache-2.0"
] | 88 | 2020-01-29T19:25:19.000Z | 2022-03-25T05:20:52.000Z | src/executor/abstract_storage_executor.py | Ashwin1934/eva | 53c1172a0f8a7409cf0ef97efea957979a8290a0 | [
"Apache-2.0"
] | 33 | 2019-12-22T06:00:19.000Z | 2022-02-02T05:28:24.000Z | # coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from src.executor.abstract_executor import AbstractExecutor
from src.planner.storage_plan import StoragePlan
class AbstractStorageExecutor(AbstractExecutor, ABC):
"""
Abstract executor for storage. This executor returns the batch frames
from the storage layer.
"""
def __init__(self, node: StoragePlan):
super().__init__(node)
| 33.310345 | 74 | 0.756729 |
7d4dd0318be19a700c470a0d1fb824cc190725d6 | 52,746 | py | Python | yaya/common/nt.py | Tony-Wang/YaYaNLP | d75780290926877e55759fb64e1440f809d653ed | [
"Apache-2.0"
] | 63 | 2016-01-14T17:25:25.000Z | 2022-01-07T04:33:48.000Z | yaya/common/nt.py | Tony-Wang/YaYaNLP | d75780290926877e55759fb64e1440f809d653ed | [
"Apache-2.0"
] | 1 | 2016-12-13T06:39:57.000Z | 2016-12-13T06:39:57.000Z | yaya/common/nt.py | Tony-Wang/YaYaNLP | d75780290926877e55759fb64e1440f809d653ed | [
"Apache-2.0"
] | 19 | 2015-12-05T12:31:49.000Z | 2021-07-02T17:43:50.000Z | # coding=utf-8
from __future__ import unicode_literals
from yaya.common.enum import Enum
__author__ = 'tony'
NT = Enum(
'A', # 上文 [参与]亚太经合组织的活动
'B', # 下文 中央电视台[报道]
'X', # 连接词 北京电视台[和]天津电视台
'C', # 特征词的一般性前缀 北京[电影]学院
'F', # 特征词的译名性前缀 美国[摩托罗拉]公司
'G', # 特征词的地名性前缀 交通银行[北京]分行
'H', # 特征词的机构名前缀 [中共中央]顾问委员会
'I', # 特征词的特殊性前缀 [华谊]医院
'J', # 特征词的简称性前缀 [巴]政府
'K', # 整个机构 [麦当劳]
'L', # 方位词
'M', # 数词 公交集团[五]分公司
'P', # 单字碎片
'W', # 符号
'D', # 机构名的特征词 国务院侨务[办公室]
'Z', # 非机构名成份
'S', # 句子的开头
enum_name="NT"
)
NTPattern = [
"CCCCCCCCD",
"CCCCCCCD",
"CCCCCCD",
"CCCCCCGD",
"CCCCCCICCCCD",
"CCCCCCPD",
"CCCCCD",
"CCCCCDD",
"CCCCCGCCD",
"CCCCCICCCCCD",
"CCCCCPCCD",
"CCCCCWDWD",
"CCCCD",
"CCCCDCCD",
"CCCCDCD",
"CCCCDD",
"CCCCID",
"CCCCPCD",
"CCCD",
"CCCDCCCD",
"CCCDCCD",
"CCCDCD",
"CCCDD",
"CCCDICFPD",
"CCCFCFFCD",
"CCCGD",
"CCCGID",
"CCCGJCD",
"CCCID",
"CCCJCCD",
"CCCJD",
"CCCLGCD",
"CCCMD",
"CCCPCCCD",
"CCCPCCD",
"CCCPCD",
"CCCPD",
"CCD",
"CCDCCCCCCD",
"CCDCCCCD",
"CCDCCCD",
"CCDCCCDD",
"CCDCCD",
"CCDCD",
"CCDCDD",
"CCDCGCD",
"CCDCGCDID",
"CCDCGCDPD",
"CCDCGGDD",
"CCDCID",
"CCDCJCCD",
"CCDCJCCDD",
"CCDD",
"CCDDD",
"CCDFIDGD",
"CCDGCCD",
"CCDICD",
"CCDID",
"CCDJCD",
"CCDPCD",
"CCDPJD",
"CCFCCD",
"CCFD",
"CCGCCCD",
"CCGCCD",
"CCGCD",
"CCGCDCD",
"CCGCDCMD",
"CCGD",
"CCGGCD",
"CCGID",
"CCGIDD",
"CCGJD",
"CCGWGWD",
"CCICCD",
"CCICD",
"CCICIFD",
"CCICJPD",
"CCID",
"CCIDCD",
"CCIDD",
"CCIID",
"CCJCCCD",
"CCJCCD",
"CCJCD",
"CCJCFD",
"CCJD",
"CCJID",
"CCJJMJD",
"CCKID",
"CCLD",
"CCMD",
"CCMMPDCD",
"CCPCCD",
"CCPCD",
"CCPD",
"CCPDCD",
"CCPPD",
"CCWCWD",
"CCWGWCCD",
"CCWGWD",
"CD",
"CDCCCCCCD",
"CDCCCCD",
"CDCCCD",
"CDCCD",
"CDCCDD",
"CDCCJD",
"CDCD",
"CDCDD",
"CDCGD",
"CDCGPCCD",
"CDCJD",
"CDCLD",
"CDCWIWD",
"CDD",
"CDDCCD",
"CDDCCDD",
"CDDCD",
"CDDD",
"CDFD",
"CDFPCCD",
"CDGCD",
"CDGCICD",
"CDGD",
"CDICD",
"CDID",
"CDILLCCD",
"CDJCCD",
"CDJCD",
"CDJD",
"CDJLD",
"CDLGCD",
"CDLJD",
"CDMCD",
"CDPCCCCD",
"CDPCCD",
"CDPD",
"CDPPD",
"CFCCD",
"CFCPD",
"CFD",
"CFPD",
"CGCCCD",
"CGCCD",
"CGCD",
"CGCDCD",
"CGCDD",
"CGD",
"CGDCD",
"CGDD",
"CGDDCCD",
"CGDDD",
"CGDDID",
"CGDJD",
"CGDMD",
"CGFD",
"CGGCCCD",
"CGGCCD",
"CGGCD",
"CGGD",
"CGGGD",
"CGGGDD",
"CGGICD",
"CGGJD",
"CGICD",
"CGID",
"CGIJD",
"CGJD",
"CGMD",
"CGPJD",
"CICCCCD",
"CICCD",
"CICD",
"CICDCD",
"CICDD",
"CICWGWD",
"CID",
"CIDD",
"CIGCD",
"CIGD",
"CIID",
"CILCD",
"CIMD",
"CJCCCCCD",
"CJCCCD",
"CJCCCDD",
"CJCCD",
"CJCCMD",
"CJCD",
"CJCDD",
"CJCGCCD",
"CJCGPJD",
"CJCMD",
"CJCPCCCD",
"CJCPD",
"CJD",
"CJDCCCCD",
"CJDCCJD",
"CJDCD",
"CJDD",
"CJDFD",
"CJDPD",
"CJFCD",
"CJFD",
"CJGD",
"CJGLD",
"CJGPCJD",
"CJID",
"CJJCCD",
"CJJD",
"CJJJD",
"CJJLD",
"CJKD",
"CJLCCD",
"CJMCD",
"CJMD",
"CJPD",
"CJWCCWCGJD",
"CJWD",
"CJWPMWCGD",
"CKCD",
"CKD",
"CKJCDCD",
"CKJPD",
"CLCCCD",
"CLCCD",
"CLCCGCD",
"CLCD",
"CLD",
"CLDFD",
"CLID",
"CLPCD",
"CMCD",
"CMCDD",
"CMCGD",
"CMD",
"CMDCD",
"CMDD",
"CMMD",
"CMMDCCD",
"CMPD",
"CPCCCCCCCD",
"CPCCCCD",
"CPCCCD",
"CPCCD",
"CPCD",
"CPCDD",
"CPCPD",
"CPD",
"CPDCCD",
"CPDCD",
"CPDD",
"CPDGD",
"CPDWGWD",
"CPGCD",
"CPGD",
"CPID",
"CPJCD",
"CPJD",
"CPJPD",
"CPMD",
"CPPD",
"CWCD",
"CWCGWCCD",
"CWCWD",
"CWDWDD",
"CWGWCCD",
"CWGWCD",
"CWPWD",
"DCCCCCD",
"DCCCCD",
"DCCCCDCCD",
"DCCCD",
"DCCD",
"DCD",
"DCDD",
"DCGCD",
"DCJD",
"DCPD",
"DD",
"DDCCD",
"DDCD",
"DDD",
"DDICCD",
"DFD",
"DGCCD",
"DGCD",
"DGD",
"DGDCD",
"DGDD",
"DGDPD",
"DGGD",
"DICCCD",
"DICD",
"DID",
"DIICD",
"DJCCD",
"DJCD",
"DJD",
"DLCCD",
"DLCD",
"DLD",
"DMCD",
"DMD",
"DMMCD",
"DPD",
"DPMMCCD",
"FCCCCCD",
"FCCCCD",
"FCCCD",
"FCCCPCD",
"FCCD",
"FCCGD",
"FCCID",
"FCCPD",
"FCCWGWD",
"FCD",
"FCDCD",
"FCDD",
"FCDFD",
"FCFCD",
"FCFPD",
"FCGCCD",
"FCGCD",
"FCGD",
"FCID",
"FCIJJD",
"FCJCD",
"FCJD",
"FCPD",
"FCPGCD",
"FCWGWD",
"FD",
"FDCD",
"FDD",
"FDFD",
"FDGCCD",
"FDID",
"FDLCD",
"FFCCD",
"FFCD",
"FFCKFCCD",
"FFCLLD",
"FFD",
"FFFD",
"FFGCCD",
"FFGD",
"FFJCD",
"FFJD",
"FFJPCD",
"FFPD",
"FGCCD",
"FGCD",
"FGCGCGCJCD",
"FGD",
"FGDD",
"FGFD",
"FGJCCD",
"FICCD",
"FICD",
"FICDD",
"FICGD",
"FICID",
"FID",
"FIDCD",
"FIDD",
"FIFPD",
"FIID",
"FIJCD",
"FIJD",
"FJCCD",
"FJCD",
"FJCDD",
"FJD",
"FJDCD",
"FJDD",
"FJGD",
"FJJCCD",
"FJJCD",
"FJJCLCD",
"FJJD",
"FJJJCCD",
"FJJJD",
"FJJJICCD",
"FJJLJLCD",
"FJPJD",
"FKCD",
"FKCJD",
"FLD",
"FLPCD",
"FMD",
"FPCCCD",
"FPCD",
"FPD",
"FPFD",
"FPFDD",
"FPID",
"FPJCCD",
"FPJCD",
"FPPCD",
"FPPD",
"FPPDLD",
"FWCCCWCD",
"FWCCCWD",
"FWDWD",
"FWFD",
"FWFWCCCWD",
"FWGJCD",
"FWGWCD",
"GCCCCCCCD",
"GCCCCCCD",
"GCCCCCD",
"GCCCCCDCD",
"GCCCCCDD",
"GCCCCD",
"GCCCCDCCD",
"GCCCCDD",
"GCCCCGD",
"GCCCCJD",
"GCCCCPD",
"GCCCCWDWD",
"GCCCD",
"GCCCDCCCD",
"GCCCDCCCDD",
"GCCCDCCD",
"GCCCDCD",
"GCCCDD",
"GCCCDDJD",
"GCCCDID",
"GCCCDMCD",
"GCCCDPD",
"GCCCDWGCDWD",
"GCCCFCD",
"GCCCGD",
"GCCCICD",
"GCCCID",
"GCCCJCD",
"GCCCJD",
"GCCCJGD",
"GCCCLD",
"GCCCMD",
"GCCCPCCD",
"GCCCWDWD",
"GCCD",
"GCCDCCCCD",
"GCCDCCCD",
"GCCDCCCDCD",
"GCCDCCD",
"GCCDCD",
"GCCDCID",
"GCCDCJCD",
"GCCDCPCD",
"GCCDD",
"GCCDDCCCD",
"GCCDDCCD",
"GCCDDD",
"GCCDFD",
"GCCDGCCD",
"GCCDGD",
"GCCDGGDCD",
"GCCDID",
"GCCDJCD",
"GCCDJD",
"GCCDLDD",
"GCCDLJCD",
"GCCDMJD",
"GCCDMJMMCD",
"GCCDMJMMD",
"GCCDMMD",
"GCCDPD",
"GCCFCD",
"GCCFDD",
"GCCFJPD",
"GCCFPD",
"GCCGCCCD",
"GCCGCCD",
"GCCGCD",
"GCCGCDD",
"GCCGD",
"GCCGGCGD",
"GCCGGDD",
"GCCICCDCCD",
"GCCICD",
"GCCID",
"GCCIDD",
"GCCJCCCD",
"GCCJCCCID",
"GCCJCCD",
"GCCJCD",
"GCCJCJD",
"GCCJD",
"GCCJICD",
"GCCJID",
"GCCJPCD",
"GCCJPD",
"GCCKD",
"GCCLCCD",
"GCCLCD",
"GCCLCGCD",
"GCCLD",
"GCCMCD",
"GCCMD",
"GCCMPD",
"GCCPCCCCD",
"GCCPCCCID",
"GCCPCCD",
"GCCPCD",
"GCCPD",
"GCCPDD",
"GCCPFWCJD",
"GCCPJD",
"GCCWCCWCD",
"GCCWCDWCD",
"GCCWDWCCD",
"GCCWDWD",
"GCD",
"GCDCCCCD",
"GCDCCCCPD",
"GCDCCCD",
"GCDCCD",
"GCDCCDCD",
"GCDCCDD",
"GCDCCDID",
"GCDCCJCD",
"GCDCCJD",
"GCDCD",
"GCDCDD",
"GCDCDICD",
"GCDCGCD",
"GCDCGD",
"GCDCGMCD",
"GCDCID",
"GCDCJCD",
"GCDCJD",
"GCDCLDD",
"GCDCMCD",
"GCDCMD",
"GCDCMDCD",
"GCDCMDD",
"GCDCMDID",
"GCDCPD",
"GCDD",
"GCDDCD",
"GCDDD",
"GCDDMCD",
"GCDFD",
"GCDFGCD",
"GCDFWFD",
"GCDGCCCCCD",
"GCDGCCD",
"GCDGCD",
"GCDGD",
"GCDGDD",
"GCDGGD",
"GCDGLCCD",
"GCDGLJPCD",
"GCDICCCCD",
"GCDICCD",
"GCDICD",
"GCDID",
"GCDIDD",
"GCDJCCD",
"GCDJCD",
"GCDJCDGPD",
"GCDJD",
"GCDJJD",
"GCDKCDCD",
"GCDLCCCD",
"GCDLD",
"GCDLGCCCCD",
"GCDLGCD",
"GCDLPD",
"GCDMCD",
"GCDMCDD",
"GCDMD",
"GCDMDD",
"GCDMJD",
"GCDPCD",
"GCDPD",
"GCDWFWD",
"GCDWGWCD",
"GCDWGWD",
"GCFCCD",
"GCFCCJFGDD",
"GCFCD",
"GCFD",
"GCFDD",
"GCFFD",
"GCFID",
"GCFJCCD",
"GCFPCD",
"GCFPD",
"GCFWGCCD",
"GCFWGCCDD",
"GCFWGJCD",
"GCGCCCD",
"GCGCCD",
"GCGCD",
"GCGCID",
"GCGCLD",
"GCGCPPCCD",
"GCGD",
"GCGDD",
"GCGGCD",
"GCGGCGD",
"GCGGD",
"GCGICD",
"GCGID",
"GCGJCCD",
"GCGPCCD",
"GCICCCCD",
"GCICCCD",
"GCICCD",
"GCICD",
"GCICDD",
"GCID",
"GCIDD",
"GCIDID",
"GCIFCCD",
"GCIID",
"GCIJCD",
"GCIJD",
"GCIJICD",
"GCIPCD",
"GCIPD",
"GCIWGIIWD",
"GCJCCCCD",
"GCJCCCD",
"GCJCCD",
"GCJCD",
"GCJCGD",
"GCJCID",
"GCJCIID",
"GCJCPD",
"GCJD",
"GCJDCCD",
"GCJDCD",
"GCJDD",
"GCJDID",
"GCJFD",
"GCJGD",
"GCJICD",
"GCJID",
"GCJJCCD",
"GCJJCD",
"GCJJD",
"GCJJGD",
"GCJKCD",
"GCJLCCD",
"GCJMD",
"GCJPCCGJLFD",
"GCJPD",
"GCJWCCJCD",
"GCKCCD",
"GCKD",
"GCLCCCD",
"GCLCCD",
"GCLCD",
"GCLD",
"GCLDD",
"GCLGGCD",
"GCMCCD",
"GCMCD",
"GCMD",
"GCMDD",
"GCMPCD",
"GCMPMD",
"GCPCCCCD",
"GCPCCCD",
"GCPCCD",
"GCPCCDD",
"GCPCD",
"GCPCDD",
"GCPCKCD",
"GCPD",
"GCPDCCD",
"GCPDD",
"GCPFD",
"GCPICCCD",
"GCPJCCD",
"GCPJCD",
"GCPJD",
"GCPJDCD",
"GCPJJCD",
"GCPJJDD",
"GCPJPD",
"GCPPCCD",
"GCPPD",
"GCPPPD",
"GCWCWCJD",
"GCWCWD",
"GCWDWCDD",
"GCWDWD",
"GCWGWDD",
"GD",
"GDCCCCCCD",
"GDCCCCCD",
"GDCCCCD",
"GDCCCCPD",
"GDCCCD",
"GDCCCDD",
"GDCCCGCCD",
"GDCCCJCD",
"GDCCCJD",
"GDCCCJDCD",
"GDCCD",
"GDCCDCD",
"GDCCDCDD",
"GDCCDD",
"GDCCID",
"GDCCJD",
"GDCCPCD",
"GDCD",
"GDCDCCD",
"GDCDCD",
"GDCDD",
"GDCDICD",
"GDCDPD",
"GDCFD",
"GDCGCCD",
"GDCGD",
"GDCGPPCCD",
"GDCID",
"GDCIDD",
"GDCJCCD",
"GDCJD",
"GDCLD",
"GDCMD",
"GDCPD",
"GDCPID",
"GDCPJD",
"GDD",
"GDDCCCCD",
"GDDCCCD",
"GDDCCD",
"GDDCD",
"GDDCDD",
"GDDCFD",
"GDDCFDCD",
"GDDCMD",
"GDDD",
"GDDDCD",
"GDDID",
"GDDPPD",
"GDDPPLD",
"GDFCCD",
"GDFCD",
"GDFD",
"GDFFD",
"GDFGD",
"GDGCCCD",
"GDGCCD",
"GDGCD",
"GDGD",
"GDGDCD",
"GDGDD",
"GDGDFID",
"GDGJCCD",
"GDGMD",
"GDICCD",
"GDICD",
"GDID",
"GDIDCD",
"GDIDD",
"GDIGCD",
"GDIID",
"GDIPCD",
"GDJCCCD",
"GDJCCD",
"GDJCD",
"GDJD",
"GDJICD",
"GDJJD",
"GDJJJD",
"GDJPCD",
"GDJPDD",
"GDLCCCCCD",
"GDLCID",
"GDLD",
"GDLJD",
"GDLJDD",
"GDMCD",
"GDMD",
"GDMDCD",
"GDMDD",
"GDMJD",
"GDMJMMD",
"GDMPD",
"GDPCCCCCD",
"GDPCCD",
"GDPCD",
"GDPD",
"GDPGCD",
"GDPID",
"GDPJCD",
"GDPJD",
"GDPPD",
"GDPPJD",
"GDWDWCCD",
"GDWDWCCDD",
"GDWDWD",
"GDWFWD",
"GDWGWD",
"GFCCCCCD",
"GFCCCCD",
"GFCCCCJD",
"GFCCCD",
"GFCCCID",
"GFCCD",
"GFCCDD",
"GFCCFCD",
"GFCCPD",
"GFCCPGD",
"GFCD",
"GFCDCD",
"GFCDD",
"GFCID",
"GFCJCD",
"GFCJD",
"GFCPCCD",
"GFCPCD",
"GFCPD",
"GFCPJD",
"GFCPJPD",
"GFD",
"GFDCCCD",
"GFDCD",
"GFDD",
"GFFCCD",
"GFFCD",
"GFFD",
"GFFPCGCD",
"GFGCD",
"GFGCID",
"GFGD",
"GFGJCD",
"GFICCD",
"GFICD",
"GFID",
"GFIICD",
"GFJCCCD",
"GFJCCD",
"GFJCD",
"GFJCDCD",
"GFJD",
"GFJJCCD",
"GFJJD",
"GFJJJCCD",
"GFJJLJCLCD",
"GFLD",
"GFLPD",
"GFMCD",
"GFPCD",
"GFPD",
"GFPJCD",
"GFPJD",
"GFPJPD",
"GFPPCCCD",
"GFPPD",
"GFWCJCPCCCWCCD",
"GFWGWCD",
"GGCCCCCD",
"GGCCCCD",
"GGCCCD",
"GGCCCICD",
"GGCCCID",
"GGCCCWDWD",
"GGCCD",
"GGCCDCD",
"GGCCDD",
"GGCCGCD",
"GGCCGD",
"GGCCGJD",
"GGCCJCD",
"GGCCJD",
"GGCD",
"GGCDCCCCCD",
"GGCDCCD",
"GGCDCD",
"GGCDD",
"GGCDJD",
"GGCFCCFCPD",
"GGCFD",
"GGCFJD",
"GGCGCCCD",
"GGCGCD",
"GGCGD",
"GGCGGD",
"GGCICLCD",
"GGCID",
"GGCIJCD",
"GGCJCCD",
"GGCJCD",
"GGCJD",
"GGCJDDCD",
"GGCJJCCD",
"GGCJJD",
"GGCJPCICCCD",
"GGCJPD",
"GGCLCD",
"GGCLD",
"GGCMD",
"GGCPCCD",
"GGCPCD",
"GGCPD",
"GGD",
"GGDCCCD",
"GGDCCD",
"GGDCD",
"GGDD",
"GGDDCCD",
"GGDDCD",
"GGDDD",
"GGDFCD",
"GGDFD",
"GGDGD",
"GGDID",
"GGDJCD",
"GGDJD",
"GGDJJD",
"GGDPPJD",
"GGFCCCD",
"GGFCCD",
"GGFCD",
"GGFD",
"GGFDD",
"GGFFCD",
"GGFFD",
"GGFFDCD",
"GGFFDD",
"GGFGD",
"GGFJCCD",
"GGFJD",
"GGFJDD",
"GGFJJD",
"GGFLD",
"GGFPCFPCD",
"GGGCCCCD",
"GGGCCCD",
"GGGCCD",
"GGGCD",
"GGGCDD",
"GGGCGCD",
"GGGCGD",
"GGGCID",
"GGGCJD",
"GGGD",
"GGGDCD",
"GGGDD",
"GGGFD",
"GGGGCD",
"GGGGD",
"GGGGFJD",
"GGGGICD",
"GGGGJD",
"GGGGJPD",
"GGGGLD",
"GGGGPCD",
"GGGGPPD",
"GGGICD",
"GGGID",
"GGGIDID",
"GGGIGCJD",
"GGGIJD",
"GGGJCD",
"GGGJD",
"GGGJJCJD",
"GGGJJD",
"GGGJPCCD",
"GGGLD",
"GGGMD",
"GGGPJD",
"GGGWICWD",
"GGICCCCD",
"GGICCCD",
"GGICCD",
"GGICCGD",
"GGICCLD",
"GGICCPCCD",
"GGICD",
"GGICGCCCD",
"GGICID",
"GGICJD",
"GGID",
"GGIDCD",
"GGIDD",
"GGIFD",
"GGIFJCD",
"GGIFPD",
"GGIGCCD",
"GGIGD",
"GGIICD",
"GGIID",
"GGIIPID",
"GGIJCCD",
"GGIJD",
"GGIPCD",
"GGIPD",
"GGIPDD",
"GGJCCCD",
"GGJCCD",
"GGJCCPCJCCD",
"GGJCD",
"GGJCWDWD",
"GGJD",
"GGJGCCCD",
"GGJGCCD",
"GGJGD",
"GGJJD",
"GGJJPCD",
"GGJLD",
"GGJPD",
"GGJPDD",
"GGKD",
"GGKGD",
"GGLCCCD",
"GGLCD",
"GGLCDD",
"GGLCJD",
"GGLCPD",
"GGLD",
"GGLFD",
"GGLID",
"GGLJD",
"GGLLFD",
"GGLPD",
"GGMCD",
"GGMCDD",
"GGMD",
"GGMJCD",
"GGMLD",
"GGMPCCD",
"GGPCCCD",
"GGPCCD",
"GGPCD",
"GGPCJCD",
"GGPD",
"GGPFD",
"GGPICD",
"GGPJCCCCD",
"GGPJCD",
"GGPJCDD",
"GGPJD",
"GGPLD",
"GGPPCCD",
"GGPPCD",
"GGPPD",
"GGPPJJD",
"GGPPPCD",
"GGWPCGWPJD",
"GICCCCCCD",
"GICCCCCD",
"GICCCCD",
"GICCCD",
"GICCCDD",
"GICCCJCD",
"GICCD",
"GICCDD",
"GICCJD",
"GICCLDD",
"GICCPD",
"GICD",
"GICDCCCCD",
"GICDCCD",
"GICDCD",
"GICDD",
"GICDLPD",
"GICDWCCWD",
"GICGCCCCD",
"GICGCCD",
"GICGCJICD",
"GICGD",
"GICGGD",
"GICGMMD",
"GICGPCJD",
"GICICCD",
"GICICD",
"GICID",
"GICIGD",
"GICIID",
"GICJCCD",
"GICJCD",
"GICJD",
"GICPCCCCD",
"GICPD",
"GICPICD",
"GICPJD",
"GID",
"GIDCCCJCD",
"GIDCCD",
"GIDCD",
"GIDD",
"GIDDD",
"GIDICCD",
"GIDID",
"GIDLPCD",
"GIFCCD",
"GIFD",
"GIFICD",
"GIFWFD",
"GIGCCD",
"GIGCD",
"GIGCGCD",
"GIGCJD",
"GIGCPD",
"GIGD",
"GIGGD",
"GIGICD",
"GIGID",
"GIGJPCD",
"GIICCCCD",
"GIICCD",
"GIICD",
"GIID",
"GIIGD",
"GIIID",
"GIIJCCCD",
"GIIJCD",
"GIJCCCCCD",
"GIJCCCCD",
"GIJCCCD",
"GIJCCD",
"GIJCD",
"GIJCPD",
"GIJD",
"GIJDD",
"GIJID",
"GIJJCCD",
"GIJJCD",
"GIJLD",
"GIJPD",
"GIJPDCD",
"GIKD",
"GILCCCCDD",
"GILCCD",
"GILCD",
"GILD",
"GILID",
"GILPMD",
"GIMCCD",
"GIMCD",
"GIMD",
"GIMJCD",
"GIMJD",
"GIMPCCD",
"GIPCCCCD",
"GIPCCCD",
"GIPCCD",
"GIPCD",
"GIPCMD",
"GIPD",
"GIPDCD",
"GIPDD",
"GIPICD",
"GIPJCCD",
"GIPJCD",
"GIPPCD",
"GIPPD",
"GIWDCCWCD",
"GIWDWD",
"GIWGWCD",
"GJCCCCCD",
"GJCCCCD",
"GJCCCD",
"GJCCCDCDCD",
"GJCCCDD",
"GJCCD",
"GJCCDCD",
"GJCCDD",
"GJCCFD",
"GJCCGJPD",
"GJCCICCD",
"GJCCJCD",
"GJCCJD",
"GJCD",
"GJCDCCD",
"GJCDCJCCD",
"GJCDD",
"GJCDJCD",
"GJCDPD",
"GJCGCD",
"GJCGD",
"GJCGPJCCD",
"GJCICCCD",
"GJCICD",
"GJCID",
"GJCJCCD",
"GJCJCD",
"GJCJD",
"GJCJJCCCCD",
"GJCJJCD",
"GJCJPD",
"GJCJPPCD",
"GJCLD",
"GJCLJCCCD",
"GJCMD",
"GJCPD",
"GJCPJD",
"GJCPPD",
"GJD",
"GJDCCCD",
"GJDCCD",
"GJDCD",
"GJDD",
"GJDICD",
"GJDID",
"GJDLCD",
"GJDPCD",
"GJFCCD",
"GJFCD",
"GJFD",
"GJFFD",
"GJFGD",
"GJFICD",
"GJGCD",
"GJGD",
"GJGPCD",
"GJICCCD",
"GJICCD",
"GJICD",
"GJID",
"GJIID",
"GJJCCCD",
"GJJCCD",
"GJJCCDD",
"GJJCD",
"GJJCJCCCD",
"GJJCJCCD",
"GJJCPCD",
"GJJD",
"GJJDCD",
"GJJDD",
"GJJFCCD",
"GJJFD",
"GJJGD",
"GJJJCD",
"GJJJD",
"GJJJICD",
"GJJJJCCD",
"GJJJJD",
"GJJPCCCD",
"GJJPCCD",
"GJJPCID",
"GJJPPD",
"GJLCCCCD",
"GJLCD",
"GJLCDD",
"GJLD",
"GJMCCD",
"GJMD",
"GJPCCCCD",
"GJPCCCD",
"GJPCCD",
"GJPCD",
"GJPCDD",
"GJPCJCD",
"GJPCLCD",
"GJPCMD",
"GJPD",
"GJPDD",
"GJPGCCD",
"GJPGD",
"GJPICCD",
"GJPICD",
"GJPICDD",
"GJPJCCD",
"GJPJD",
"GJPJPD",
"GJPLCD",
"GJPPJD",
"GKCCCD",
"GKCCD",
"GKCCPD",
"GKCD",
"GKCDCD",
"GKCDD",
"GKCDJCD",
"GKCJCD",
"GKCMD",
"GKD",
"GKDD",
"GKJJD",
"GLCCCCCCD",
"GLCCCCD",
"GLCCCD",
"GLCCD",
"GLCCDD",
"GLCCJCCCD",
"GLCCJCCD",
"GLCD",
"GLCDD",
"GLCDGCCD",
"GLCGCJCD",
"GLCGD",
"GLCGDD",
"GLCJD",
"GLCJJCCCCCD",
"GLCLD",
"GLCMD",
"GLCPCCD",
"GLCPD",
"GLD",
"GLDCD",
"GLDCMD",
"GLDCMDCD",
"GLDCMDD",
"GLDD",
"GLDDCKCD",
"GLFCD",
"GLFCFD",
"GLFGCD",
"GLGCD",
"GLGD",
"GLGPJD",
"GLICCD",
"GLICD",
"GLID",
"GLJCCCD",
"GLJCCD",
"GLJCD",
"GLJCICCD",
"GLJD",
"GLJFCD",
"GLJGD",
"GLJICCD",
"GLJID",
"GLJJD",
"GLJPCCD",
"GLJPCICD",
"GLJPJCCD",
"GLJWGWCD",
"GLLCCCD",
"GLLCID",
"GLPCCCD",
"GLPCCD",
"GLPCD",
"GLPCDD",
"GLPCPCCD",
"GLPD",
"GLPDD",
"GLPGCD",
"GLPJD",
"GLPLJCCCD",
"GLPLJCD",
"GLPPCCCCD",
"GLPPCCD",
"GLPPCD",
"GMCCCCD",
"GMCCCD",
"GMCCD",
"GMCCID",
"GMCD",
"GMCDCCCD",
"GMCDCCD",
"GMCDCD",
"GMCDD",
"GMCDMCD",
"GMCGD",
"GMCJCD",
"GMCMD",
"GMCMJD",
"GMD",
"GMDCD",
"GMDD",
"GMDICD",
"GMDID",
"GMGJCD",
"GMGJJD",
"GMICD",
"GMID",
"GMIPJCCD",
"GMJCCD",
"GMJCD",
"GMJD",
"GMJDD",
"GMJICCCD",
"GMJMJFCD",
"GMJPCD",
"GMJPLCCD",
"GMLD",
"GMLDCD",
"GMLGCD",
"GMLID",
"GMLLD",
"GMMCCCD",
"GMMD",
"GMMGD",
"GMMLCCD",
"GMMPCD",
"GMMPD",
"GMPCCD",
"GMPCD",
"GMPD",
"GMPDCD",
"GMPDD",
"GMPJCD",
"GPCCCCCCD",
"GPCCCCD",
"GPCCCCID",
"GPCCCD",
"GPCCD",
"GPCCDCCD",
"GPCCDD",
"GPCCDDD",
"GPCD",
"GPCDCCD",
"GPCDCD",
"GPCDD",
"GPCFDCCD",
"GPCFDD",
"GPCGD",
"GPCICCD",
"GPCID",
"GPCIJD",
"GPCJCCCD",
"GPCJCCD",
"GPCJCD",
"GPCPID",
"GPCWDWCD",
"GPD",
"GPDCCD",
"GPDCD",
"GPDD",
"GPFCCD",
"GPFCD",
"GPFD",
"GPFFCD",
"GPGCCCD",
"GPGD",
"GPGJCJCCCCD",
"GPGPJD",
"GPICCCCD",
"GPICCCD",
"GPICCD",
"GPICD",
"GPID",
"GPIDCD",
"GPIDD",
"GPJCCCCCD",
"GPJCCCD",
"GPJCCD",
"GPJCD",
"GPJCDD",
"GPJCJCCD",
"GPJD",
"GPJDCCD",
"GPJDCD",
"GPJDD",
"GPJFICD",
"GPJFID",
"GPJGD",
"GPJJCCD",
"GPJJCD",
"GPJLCD",
"GPJWDWD",
"GPLCWCWCWD",
"GPLD",
"GPLJCCD",
"GPMJCGD",
"GPMMD",
"GPMPCCD",
"GPPCCCCD",
"GPPCCCD",
"GPPCCD",
"GPPCD",
"GPPCDCCD",
"GPPCDD",
"GPPCLD",
"GPPD",
"GPPDCD",
"GPPDCDD",
"GPPDD",
"GPPGCD",
"GPPICCD",
"GPPID",
"GPPJCD",
"GPPJD",
"GPPJDD",
"GPPJJCCCCD",
"GPPLD",
"GPPPCCD",
"GPPPCKCCD",
"GPPPPCCD",
"GWCPWD",
"GWCWCCCD",
"GWCWCD",
"GWCWD",
"GWCWPJCD",
"GWD",
"GWFCD",
"GWGCCCD",
"GWGCCD",
"GWGCCWCD",
"GWGCD",
"GWGCWD",
"GWGD",
"GWGID",
"GWGWCCCCD",
"GWGWCCCD",
"GWGWCD",
"GWGWICD",
"GWGWLCD",
"GWICD",
"GWICWD",
"GWIWD",
"GWJWD",
"GWLJWCD",
"GWPD",
"GWPJD",
"ICCCCCCD",
"ICCCCCD",
"ICCCCD",
"ICCCCDD",
"ICCCD",
"ICCD",
"ICCDCCD",
"ICCDCD",
"ICCDD",
"ICCGCCD",
"ICCGCIPD",
"ICCGD",
"ICCJD",
"ICCPD",
"ICCWDWCD",
"ICD",
"ICDD",
"ICDID",
"ICFD",
"ICGCCCD",
"ICGCD",
"ICGFD",
"ICGGCD",
"ICGLCMD",
"ICICD",
"ICID",
"ICIGD",
"ICJCD",
"ICJD",
"ICJJD",
"ICLJCD",
"ICMCCCCD",
"ICMD",
"ICPCD",
"ICPD",
"ICPPD",
"ICWGWCD",
"ICWGWD",
"ICWGWDCD",
"ID",
"IDCCCCD",
"IDCCCD",
"IDCCD",
"IDCCGJID",
"IDCCICD",
"IDCCICDID",
"IDCD",
"IDCDCD",
"IDCDD",
"IDCFCD",
"IDCGD",
"IDCICD",
"IDCID",
"IDCJD",
"IDCPCCCCCCD",
"IDD",
"IDGCCCD",
"IDGCD",
"IDID",
"IDIDD",
"IDJCD",
"IDKCD",
"IDPD",
"IDWCWCCDD",
"IFD",
"IFWGWCD",
"IGCCCD",
"IGCCCDD",
"IGCCD",
"IGCD",
"IGCDCD",
"IGCDD",
"IGCGCCD",
"IGCGCD",
"IGCID",
"IGCJD",
"IGCPD",
"IGCWJWD",
"IGD",
"IGDD",
"IGFCCD",
"IGFCD",
"IGFD",
"IGGCD",
"IGID",
"IGJD",
"IGLCD",
"IGLD",
"IGPCD",
"IGPCDD",
"IICCCD",
"IICCD",
"IICD",
"IICGD",
"IID",
"IIGD",
"IIGJCJCD",
"IIIGCD",
"IIPCD",
"IJCCCCD",
"IJCCCD",
"IJCCD",
"IJCD",
"IJD",
"IJDCCD",
"IJGCD",
"IJGD",
"IJJCD",
"IJJD",
"IJJJCD",
"IJPCDD",
"IJWCFIWGD",
"IJWCFWD",
"IJWCPWGD",
"IKCCCD",
"ILCD",
"ILD",
"ILPCD",
"ILPMD",
"IMCCD",
"IMCD",
"IMD",
"IMPD",
"IPCCCD",
"IPCCD",
"IPCCID",
"IPCCJD",
"IPCD",
"IPCID",
"IPCJD",
"IPCPD",
"IPD",
"IPFCD",
"IPID",
"IPIJD",
"IPJCGD",
"IPJD",
"IPPCD",
"JCCCCCCD",
"JCCCCCD",
"JCCCCD",
"JCCCD",
"JCCCJCD",
"JCCD",
"JCCID",
"JCCJD",
"JCCMCD",
"JCD",
"JCDCCD",
"JCDCD",
"JCDD",
"JCDID",
"JCFCD",
"JCGCCCCD",
"JCGCCCD",
"JCGCCD",
"JCGCD",
"JCGD",
"JCGJGD",
"JCICCCD",
"JCID",
"JCIDD",
"JCJCCCD",
"JCJCCD",
"JCJCD",
"JCJD",
"JCJDD",
"JCJFD",
"JCJJPCD",
"JCJPID",
"JCJWGWD",
"JCLD",
"JCMD",
"JCMPD",
"JCPJCID",
"JCPJJCD",
"JCPPCCCD",
"JD",
"JDCD",
"JDCMD",
"JDD",
"JDGD",
"JDID",
"JDJD",
"JDMD",
"JFCD",
"JFD",
"JGCCCD",
"JGCD",
"JGD",
"JGDCJD",
"JGGD",
"JGPD",
"JICCCD",
"JICD",
"JID",
"JIDD",
"JIID",
"JIJD",
"JILD",
"JJCCCD",
"JJCCD",
"JJCCPGD",
"JJCD",
"JJD",
"JJDCJD",
"JJDD",
"JJGCCD",
"JJGD",
"JJICD",
"JJID",
"JJJCCCD",
"JJJCD",
"JJJCFCCCD",
"JJJD",
"JJJGD",
"JJMCID",
"JJPCD",
"JJPD",
"JJPPJLCD",
"JJWFWCCJJD",
"JJWGWCD",
"JJWGWCDD",
"JKCD",
"JKD",
"JLCCD",
"JLCCDD",
"JLCCJD",
"JLCD",
"JLCDD",
"JLCMD",
"JLCMDD",
"JLD",
"JLDD",
"JLGCJD",
"JLGJCCCJD",
"JLJD",
"JMCD",
"JMD",
"JMJD",
"JMPD",
"JPCCD",
"JPCD",
"JPCMD",
"JPCMDPD",
"JPD",
"JPDCCCD",
"JPDD",
"JPDGCD",
"JPFCCD",
"JPFD",
"JPICD",
"JPID",
"JPIID",
"JPJD",
"JPJJCCCFPCD",
"JPMD",
"JPMDCCD",
"JPMDD",
"JPPJD",
"JPPJLCD",
"KCCCCCD",
"KCCCCD",
"KCCCCDCD",
"KCCCD",
"KCCCDCD",
"KCCCDD",
"KCCCDDCCCD",
"KCCCGD",
"KCCD",
"KCCDCCD",
"KCCDCD",
"KCCJD",
"KCCJDID",
"KCCPD",
"KCD",
"KCDCCCCD",
"KCDCCD",
"KCDCD",
"KCDD",
"KCDICD",
"KCDJD",
"KCGCCCD",
"KCGCCCDD",
"KCGCCD",
"KCGCD",
"KCGD",
"KCGGGD",
"KCICD",
"KCID",
"KCIDCD",
"KCJCD",
"KCJD",
"KCKCD",
"KCMD",
"KCMDCD",
"KCPD",
"KCWGWD",
"KD",
"KDCCCD",
"KDCD",
"KDD",
"KDICD",
"KDLCCPD",
"KFCD",
"KFCDD",
"KFD",
"KFWFD",
"KGCCCD",
"KGCCD",
"KGCD",
"KGCDCCD",
"KGD",
"KGDD",
"KGGD",
"KGJPD",
"KICCD",
"KICD",
"KICDD",
"KID",
"KIDCCD",
"KIDJCD",
"KIGID",
"KIMCD",
"KIMD",
"KIWGWD",
"KJCCD",
"KJCD",
"KJD",
"KJDD",
"KJICCD",
"KJJD",
"KJJDCD",
"KJJJD",
"KJPD",
"KLCCD",
"KLD",
"KMCCJCCD",
"KMCD",
"KMCDD",
"KMD",
"KMDCD",
"KMDD",
"KMMD",
"KMMMD",
"KPCCCD",
"KPCCD",
"KPCD",
"KPD",
"KPDD",
"LCCCCD",
"LCCCD",
"LCCD",
"LCCDD",
"LCCDJCCD",
"LCCGD",
"LCCGID",
"LCCID",
"LCCPCD",
"LCCWGWD",
"LCD",
"LCDCCD",
"LCDCD",
"LCDCDD",
"LCDCDIGCD",
"LCDD",
"LCDFD",
"LCDGDD",
"LCDGID",
"LCDID",
"LCDLD",
"LCDLDCD",
"LCDLDD",
"LCDMCDD",
"LCDPD",
"LCGD",
"LCGDD",
"LCICCWGWD",
"LCID",
"LCIGD",
"LCJCD",
"LCJD",
"LCLD",
"LCMCCD",
"LCMCDD",
"LCMCID",
"LCMCMD",
"LCMD",
"LCMJCICD",
"LCMJD",
"LCPCJCD",
"LCPD",
"LCPMD",
"LCPPCD",
"LD",
"LDCCD",
"LDCD",
"LDCLCD",
"LDCLCDCD",
"LDCPD",
"LDD",
"LDDD",
"LDLCCCCD",
"LFCD",
"LFCFD",
"LFD",
"LFPPPCCD",
"LGCD",
"LGD",
"LGGCCCD",
"LGGCD",
"LGJCD",
"LGJLCD",
"LGJLD",
"LICCCD",
"LICCD",
"LICD",
"LICLD",
"LID",
"LIGD",
"LIPCCCD",
"LIWGWCCCD",
"LJCCCCD",
"LJCCCCWGWD",
"LJCCCD",
"LJCCD",
"LJCCDCCCD",
"LJCCDCCD",
"LJCCDCD",
"LJCCDID",
"LJCCDJCD",
"LJCD",
"LJCDD",
"LJCGD",
"LJCJJD",
"LJCWCWJWCWJD",
"LJD",
"LJDCCD",
"LJDCD",
"LJDD",
"LJDJPD",
"LJDJPDD",
"LJDJPDID",
"LJDJPMDD",
"LJFJJCLCD",
"LJGD",
"LJID",
"LJJCD",
"LJJD",
"LJLD",
"LJMD",
"LJPCD",
"LKCD",
"LLCD",
"LLD",
"LLPD",
"LMCCFCCD",
"LMCD",
"LMD",
"LMID",
"LPCCCCCD",
"LPCCCD",
"LPCCD",
"LPCD",
"LPCDD",
"LPCFPPD",
"LPCGCCCD",
"LPCGCCD",
"LPCGCCDCCD",
"LPCGD",
"LPCGDDPD",
"LPD",
"LPDD",
"LPDDD",
"LPICD",
"LPID",
"LPJD",
"LPMDCCD",
"LPPJD",
"MCCCD",
"MCCD",
"MCCPD",
"MCD",
"MCDCCD",
"MCDCCDCD",
"MCDCCDD",
"MCDCD",
"MCDCGD",
"MCDD",
"MCDFD",
"MCDFDD",
"MCDLCD",
"MCDPPD",
"MCGCD",
"MCICD",
"MCID",
"MCIDWGWD",
"MCJD",
"MCLD",
"MCPD",
"MD",
"MDD",
"MFD",
"MGD",
"MGJD",
"MGJJD",
"MICCD",
"MICD",
"MID",
"MIDCCD",
"MJCCD",
"MJCD",
"MJD",
"MJDD",
"MLCD",
"MLD",
"MLGD",
"MLGGD",
"MMCCD",
"MMCD",
"MMD",
"MMMD",
"MMPD",
"MPCCD",
"MPCD",
"MPD",
"MPDCD",
"MPJPD",
"MPPD",
"PCCCCCCD",
"PCCCCCD",
"PCCCCD",
"PCCCD",
"PCCCDD",
"PCCD",
"PCCDD",
"PCCGJGD",
"PCCID",
"PCCIDD",
"PCD",
"PCDCD",
"PCDCJCD",
"PCDD",
"PCDFCCCD",
"PCDID",
"PCGCCD",
"PCGCD",
"PCGD",
"PCID",
"PCJCD",
"PCJGD",
"PCPCCD",
"PCPD",
"PD",
"PDCCD",
"PDD",
"PDDD",
"PFCCD",
"PFCDD",
"PFCJCD",
"PFD",
"PFFCD",
"PFPCD",
"PGCD",
"PGCJD",
"PGD",
"PGDCICD",
"PGJD",
"PICCD",
"PICD",
"PICDD",
"PID",
"PIFD",
"PIJCCD",
"PIJD",
"PJCCCDD",
"PJCCD",
"PJCD",
"PJD",
"PJDCD",
"PJDD",
"PJFD",
"PJGD",
"PJICCCPCD",
"PJID",
"PJJD",
"PJJDD",
"PJJPD",
"PJLPCD",
"PJPCD",
"PJPD",
"PLD",
"PLPCD",
"PMJCD",
"PPCCCDCD",
"PPCD",
"PPCJCCD",
"PPD",
"PPDCD",
"PPFCCD",
"PPFCD",
"PPGCID",
"PPGD",
"PPGJCCD",
"PPICCD",
"PPIGD",
"PPJCD",
"PPJD",
"PPJJD",
"PPMD",
"PPPCPD",
"PPPD",
"PPPWGWCCD",
"CCCCDID",
"CCCDFGD",
"CCCDGCD",
"CCCDGDD",
"CCCDWD",
"CCCGCCD",
"CCCGCD",
"CCCWCWD",
"CCCWGWCCD",
"CCCWGWCCDWD",
"CCCWGWD",
"CCDDGCD",
"CCDPCCD",
"CCDWD",
"CCFGCCCCCD",
"CCFGFCCCD",
"CCFPCD",
"CCGDD",
"CCGGCCD",
"CCIDGD",
"CCKD",
"CCMIDGCD",
"CCWD",
"CCWGWCCCD",
"CCWGWCD",
"CCWGWDD",
"CDWGWDGD",
"CFCCGWD",
"CFCD",
"CFCWGWD",
"CFGFGFGFGJID",
"CFJD",
"CFWGWCCDGCD",
"CFWGWCJCD",
"CGCCCCD",
"CGCCID",
"CGCCJCCCD",
"CGCDCCD",
"CGCFCCD",
"CGCGCD",
"CGCID",
"CGFCCD",
"CGFCD",
"CGFDID",
"CGGCICD",
"CGGJPD",
"CGICDGCD",
"CGICDID",
"CGIID",
"CGJCCCD",
"CGJCCD",
"CGJCD",
"CGJCDGD",
"CGJCDWD",
"CGJCJCD",
"CGJDD",
"CGJDDCCD",
"CGJGCD",
"CGJID",
"CGLCCD",
"CGPCCD",
"CGPCD",
"CGPD",
"CGPFCCD",
"CGPICD",
"CGPID",
"CGPJCDD",
"CGPJJJCD",
"CICCDGD",
"CICFJGD",
"CICGFID",
"CIDCD",
"CIDGD",
"CIFID",
"CIGCCD",
"CIGMCD",
"CIICCD",
"CIICD",
"CIJCWGWCD",
"CIJD",
"CIJWD",
"CIPCCD",
"CJCCDFD",
"CJCGD",
"CJCID",
"CJCWCCCD",
"CJCWGWD",
"CJGCCCD",
"CJICD",
"CJIDD",
"CJJCD",
"CJWGCD",
"CJWGWID",
"CPCCDGJD",
"CPCDCCD",
"CPDFCD",
"CPGID",
"CPICD",
"CPIWGWD",
"CPJGD",
"CPPCD",
"CPWGWDGD",
"D",
"FCCCCCCCD",
"FCCCCGD",
"FCCCDGD",
"FCCCWGWD",
"FCCDD",
"FCCDFCGD",
"FCCDGD",
"FCCDIPD",
"FCCDWGWD",
"FCCPCD",
"FCCWGWDD",
"FCDGD",
"FCDWD",
"FCDWGD",
"FCFWGWD",
"FCICCD",
"FCICDGD",
"FCIWGWDD",
"FCPCD",
"FCPCPD",
"FCPDGD",
"FCPPGD",
"FCWGWCD",
"FCWGWDD",
"FDDD",
"FDGD",
"FDGJCCD",
"FDWGWD",
"FFCCWGWD",
"FFFFD",
"FFFFFWWFD",
"FFFFWWD",
"FFFWD",
"FFFWWD",
"FFFWWFD",
"FFWWD",
"FGFPCCD",
"FGJWGWD",
"FICCCD",
"FICDGD",
"FICGWD",
"FICJD",
"FIICD",
"FIWGWCDD",
"FIWGWD",
"FIWGWDD",
"FJCCDD",
"FJGPCD",
"FJID",
"FJJGD",
"FMJD",
"FPCCD",
"FPCDD",
"FPDD",
"FPIDGD",
"FPWCWD",
"FWFWFD",
"FWGCD",
"FWGWCCD",
"FWGWCDGCD",
"FWGWCDGD",
"FWGWGD",
"FWJD",
"GCCCCCCDCD",
"GCCCCCDGD",
"GCCCCCID",
"GCCCCCKFD",
"GCCCCDCD",
"GCCCCDGCD",
"GCCCCDGCIJD",
"GCCCCDGDGDDDD",
"GCCCCDWFCCD",
"GCCCCDWGD",
"GCCCCFCCCCD",
"GCCCCID",
"GCCCDCPD",
"GCCCDDGCD",
"GCCCDDGD",
"GCCCDFCD",
"GCCCDGD",
"GCCCDGID",
"GCCCDICD",
"GCCCDMD",
"GCCCDWGCDWFCCD",
"GCCCDWGD",
"GCCCDWGWD",
"GCCCDWID",
"GCCCGPD",
"GCCCIJD",
"GCCCJCCD",
"GCCCJJCD",
"GCCCMCD",
"GCCCWD",
"GCCDCCMD",
"GCCDDWD",
"GCCDFCCD",
"GCCDGCD",
"GCCDGCGD",
"GCCDGDGCD",
"GCCDGJD",
"GCCDPPCD",
"GCCDWD",
"GCCFCCD",
"GCCFID",
"GCCFJCD",
"GCCFWCWCD",
"GCCGDCD",
"GCCGFD",
"GCCGFICD",
"GCCGID",
"GCCGIID",
"GCCICCD",
"GCCICDCD",
"GCCICWDD",
"GCCIDWDCD",
"GCCIID",
"GCCIJD",
"GCCJCDD",
"GCCJCGCD",
"GCCJDD",
"GCCJIDCD",
"GCCKDGD",
"GCCMJCD",
"GCCMJJCD",
"GCCWD",
"GCDCCCDGD",
"GCDCWDWD",
"GCDDDD",
"GCDDJCD",
"GCDFCD",
"GCDFID",
"GCDFJD",
"GCDGCGD",
"GCDGGGCD",
"GCDGIID",
"GCDIID",
"GCDKD",
"GCDMDFD",
"GCDPGD",
"GCDWD",
"GCDWDWD",
"GCFCCCD",
"GCFCCCDGD",
"GCFCDICD",
"GCFCDWGD",
"GCFCIFD",
"GCFCJD",
"GCFDDCID",
"GCFFJD",
"GCFGJPCD",
"GCFICD",
"GCFIDFD",
"GCFJD",
"GCFJDD",
"GCFJPD",
"GCFPCCCD",
"GCFPDD",
"GCFPID",
"GCGCCCCD",
"GCGCCCID",
"GCGCCCIDD",
"GCGCCDD",
"GCGCCDFD",
"GCGCCID",
"GCGCCJCD",
"GCGCCPD",
"GCGCDCCCD",
"GCGCDCD",
"GCGCDCID",
"GCGCDD",
"GCGCFCCD",
"GCGCFCD",
"GCGCFGCD",
"GCGCGCCD",
"GCGCGCD",
"GCGCGCPCCD",
"GCGCGD",
"GCGCGID",
"GCGCGPD",
"GCGCICCCD",
"GCGCICDDFCCCD",
"GCGCIDD",
"GCGCIID",
"GCGCJCCD",
"GCGCJD",
"GCGCJGWD",
"GCGCJJD",
"GCGCLCCD",
"GCGCPCCD",
"GCGCPCCID",
"GCGCPCD",
"GCGCPCJCCD",
"GCGDCCICCD",
"GCGDCD",
"GCGDIMD",
"GCGFCCD",
"GCGFCD",
"GCGFCJD",
"GCGFCMJD",
"GCGFD",
"GCGFDD",
"GCGFFCD",
"GCGFFD",
"GCGFID",
"GCGFIDD",
"GCGFJD",
"GCGGCCD",
"GCGGGJCD",
"GCGGJCID",
"GCGGJCJD",
"GCGICCCD",
"GCGICCD",
"GCGICCJD",
"GCGICDMD",
"GCGICICCD",
"GCGICJCDD",
"GCGICJD",
"GCGICJJD",
"GCGIDCGD",
"GCGIDD",
"GCGIDGD",
"GCGIGCCD",
"GCGIICD",
"GCGIID",
"GCGIMCCD",
"GCGIMJD",
"GCGIPCCD",
"GCGIPD",
"GCGJCCCCDD",
"GCGJCCCD",
"GCGJCCDD",
"GCGJCD",
"GCGJCID",
"GCGJD",
"GCGJDD",
"GCGJGICD",
"GCGJICD",
"GCGJID",
"GCGJIFCD",
"GCGJJCD",
"GCGJPCCD",
"GCGJPCD",
"GCGKCD",
"GCGKD",
"GCGLCDCCD",
"GCGLCJD",
"GCGLGCCD",
"GCGLGPCCID",
"GCGLIPJD",
"GCGLJJID",
"GCGMCD",
"GCGMD",
"GCGPCCCCCD",
"GCGPCCCD",
"GCGPCD",
"GCGPCFCCD",
"GCGPCID",
"GCGPCPD",
"GCGPD",
"GCGPFCD",
"GCGPGCD",
"GCGPIID",
"GCGPJCCD",
"GCGPJCD",
"GCGPJD",
"GCGPJGCD",
"GCGPJID",
"GCGPLICD",
"GCGPLID",
"GCGPPCCD",
"GCGPPCD",
"GCGPPD",
"GCGPPID",
"GCGPPJD",
"GCGWPFCD",
"GCICCCDD",
"GCICCDFD",
"GCICCJD",
"GCICCWDWDCGD",
"GCICDFCD",
"GCICPD",
"GCIDCD",
"GCIDCGD",
"GCIDDGD",
"GCIDPCCD",
"GCIICD",
"GCIJCCD",
"GCIJCCDMD",
"GCIJCID",
"GCIKD",
"GCIPCCD",
"GCIPCPD",
"GCJCCCCCD",
"GCJCCDCD",
"GCJCCDGD",
"GCJCCDMD",
"GCJCCICD",
"GCJCDD",
"GCJCICD",
"GCJCKDD",
"GCJDCDCD",
"GCJDDCD",
"GCJGCD",
"GCJICCCD",
"GCJICGD",
"GCJIDCD",
"GCJIDD",
"GCJJCDD",
"GCJJCJCD",
"GCJJDD",
"GCJMCID",
"GCJPCCCD",
"GCJPCCD",
"GCJPCD",
"GCJPCDMD",
"GCJPID",
"GCJPJD",
"GCJWCPWD",
"GCKCCCD",
"GCKCD",
"GCKDGD",
"GCKGD",
"GCKICD",
"GCKJCCD",
"GCKPD",
"GCLCID",
"GCLGIJCD",
"GCLID",
"GCMCCDFD",
"GCMCCKGD",
"GCMCJCCD",
"GCMCPD",
"GCMDCGCD",
"GCMFCDGD",
"GCMID",
"GCMJCD",
"GCMJCDD",
"GCMJCID",
"GCMJID",
"GCMJPCCCCD",
"GCMKD",
"GCMKGD",
"GCMPCCD",
"GCMPJD",
"GCMPPCCD",
"GCPCCCMD",
"GCPCCDCD",
"GCPCCDMD",
"GCPCCDWD",
"GCPCCWGCWD",
"GCPCDCD",
"GCPCDGD",
"GCPCDWD",
"GCPCICDWGD",
"GCPCIICFD",
"GCPCJCFD",
"GCPCJD",
"GCPDGD",
"GCPGGCD",
"GCPICCCDGD",
"GCPICCD",
"GCPICD",
"GCPICID",
"GCPID",
"GCPIJCCD",
"GCPJCDD",
"GCPJPDD",
"GCPKD",
"GCPMCCD",
"GCPMJCD",
"GCPPCD",
"GCPPID",
"GCPPWCWID",
"GCPWCWCD",
"GCPWDWDCCD",
"GCWDWDCCD",
"GCWGWCCD",
"GCWGWD",
"GCWGWJD",
"GCWJCCD",
"GDCCCCFCD",
"GDCCCFCPD",
"GDCCPCCD",
"GDCDGCD",
"GDCDJD",
"GDCKGCD",
"GDDGCD",
"GDDGD",
"GDDMD",
"GDICCCD",
"GDIPD",
"GDJCICD",
"GDLCCD",
"GFCCCCCCD",
"GFCCCCFD",
"GFCCCDD",
"GFCCCDDD",
"GFCCCDFCD",
"GFCCCDFDD",
"GFCCCDGCD",
"GFCCCDGD",
"GFCCCDID",
"GFCCDCCD",
"GFCCDCD",
"GFCCDCFD",
"GFCCDDD",
"GFCCDFCD",
"GFCCDFDMD",
"GFCCDFFCD",
"GFCCDFFD",
"GFCCDFGD",
"GFCCDGCCD",
"GFCCDGCD",
"GFCCDGD",
"GFCCDGGID",
"GFCCDGICDJICD",
"GFCCDID",
"GFCCDLD",
"GFCCDMD",
"GFCCDWCD",
"GFCCDWD",
"GFCCDWFDD",
"GFCCDWGCD",
"GFCCDWGD",
"GFCCID",
"GFCCJD",
"GFCCPCD",
"GFCDDCCCD",
"GFCDFCCD",
"GFCDFCD",
"GFCDGCD",
"GFCDGD",
"GFCDGDD",
"GFCDGFCD",
"GFCDGGCD",
"GFCDGGD",
"GFCDGPD",
"GFCDID",
"GFCDMIDMD",
"GFCDWCD",
"GFCDWD",
"GFCDWGD",
"GFCFCD",
"GFCGCD",
"GFCGD",
"GFCICD",
"GFCIDCGD",
"GFCIDWD",
"GFCJCCCD",
"GFCJCCD",
"GFCJCCDD",
"GFCJCJD",
"GFCJDD",
"GFCJID",
"GFCKD",
"GFCLCD",
"GFCMCCD",
"GFCMJCDWD",
"GFCPDGD",
"GFCPPCD",
"GFCWCD",
"GFCWCWCD",
"GFCWFWFCCD",
"GFCWGWD",
"GFDCCD",
"GFDCDCDD",
"GFDCDD",
"GFDCDGD",
"GFDCID",
"GFDDCCD",
"GFDDCGD",
"GFDDD",
"GFDDGD",
"GFDDPD",
"GFDGCD",
"GFDGD",
"GFDICD",
"GFDICPCD",
"GFDID",
"GFDJPCD",
"GFDWD",
"GFDWDWD",
"GFFCCCD",
"GFFCJD",
"GFFDD",
"GFFJJDGD",
"GFFPDGD",
"GFGCCCDD",
"GFGCCD",
"GFGCCDGPD",
"GFGFICD",
"GFGMPD",
"GFICCDCD",
"GFICCDD",
"GFICDCD",
"GFICDCJD",
"GFICDD",
"GFICDGD",
"GFICJD",
"GFICKD",
"GFIDD",
"GFIDFGD",
"GFIDGCD",
"GFIDGD",
"GFIDPCPCD",
"GFIGD",
"GFIID",
"GFIIDFCD",
"GFIIGD",
"GFIJCCD",
"GFIJD",
"GFJCCCCD",
"GFJCCDD",
"GFJCDD",
"GFJCDGD",
"GFJCDWD",
"GFJCJD",
"GFJDD",
"GFJDGCD",
"GFJDGFCD",
"GFJDWD",
"GFJDWFICGD",
"GFJFD",
"GFJICD",
"GFJICDGD",
"GFJID",
"GFJJCD",
"GFJJDWGD",
"GFKD",
"GFKDGD",
"GFLCD",
"GFMJCD",
"GFPCCCD",
"GFPCCD",
"GFPCDCD",
"GFPCDD",
"GFPCJD",
"GFPDCD",
"GFPDD",
"GFPDID",
"GFPICD",
"GFPIJD",
"GFPJIDD",
"GFPKD",
"GFPPCCD",
"GFPPCD",
"GFWCWID",
"GFWDWD",
"GFWJD",
"GGCCCCCCD",
"GGCCCCJCD",
"GGCCCDD",
"GGCCCDDD",
"GGCCCDGD",
"GGCCCDWGCD",
"GGCCDCDGD",
"GGCCDFGCD",
"GGCCDGD",
"GGCCDGDCD",
"GGCCDID",
"GGCCDMCD",
"GGCCDWGD",
"GGCCFCD",
"GGCCFD",
"GGCCGCCD",
"GGCCICDD",
"GGCCID",
"GGCCJCCDD",
"GGCCLCD",
"GGCCPCD",
"GGCCPJD",
"GGCDCCDGD",
"GGCDCDD",
"GGCDCDGD",
"GGCDCGD",
"GGCDDCCD",
"GGCDGD",
"GGCDGPGCD",
"GGCDID",
"GGCDMD",
"GGCFCCD",
"GGCFCD",
"GGCFID",
"GGCGCCCCD",
"GGCGCCD",
"GGCGCGCCD",
"GGCGCGCD",
"GGCGCGFCD",
"GGCGDGCD",
"GGCGFD",
"GGCGFID",
"GGCGGCD",
"GGCGGGD",
"GGCGGJD",
"GGCGICCD",
"GGCGIICD",
"GGCGILICD",
"GGCGJID",
"GGCGJIJCD",
"GGCGPCCD",
"GGCGPCD",
"GGCGPJCCD",
"GGCGPJCD",
"GGCGPJD",
"GGCGPPD",
"GGCICCD",
"GGCICCID",
"GGCICD",
"GGCIDD",
"GGCIICD",
"GGCIIJD",
"GGCIPCICCD",
"GGCIPD",
"GGCJCDD",
"GGCJCJD",
"GGCJDD",
"GGCJID",
"GGCKLCD",
"GGCLCCD",
"GGCMCIJD",
"GGCMID",
"GGCPCCCD",
"GGCPCCJCCCWD",
"GGCPCDD",
"GGCPDD",
"GGCPGGCID",
"GGCPICD",
"GGCPICDD",
"GGCPID",
"GGCPJCD",
"GGCPPCCD",
"GGCPPD",
"GGCWDWCCDGCD",
"GGCWGD",
"GGDCDCCD",
"GGDCJD",
"GGDDFD",
"GGDGCCGCD",
"GGDGCDGD",
"GGDLGD",
"GGFCCCCD",
"GGFCDID",
"GGFCFCDD",
"GGFCID",
"GGFCJD",
"GGFCMCCD",
"GGFDCD",
"GGFDDD",
"GGFICD",
"GGFICDD",
"GGFID",
"GGFJCD",
"GGFJID",
"GGFJMD",
"GGFKID",
"GGFMJDD",
"GGFPCD",
"GGFPD",
"GGFWID",
"GGGCCCDGD",
"GGGCCCICD",
"GGGCCDGD",
"GGGCCID",
"GGGCGGD",
"GGGCJCD",
"GGGCPD",
"GGGCPFCPCD",
"GGGCPJD",
"GGGFCCD",
"GGGFCCID",
"GGGFCD",
"GGGFCJD",
"GGGFID",
"GGGGPJD",
"GGGICCD",
"GGGICJD",
"GGGIDGID",
"GGGIICD",
"GGGJCCD",
"GGGJGID",
"GGGKCD",
"GGGKDJD",
"GGGLJCD",
"GGGMCD",
"GGGPCD",
"GGGPFIDWD",
"GGGPIICD",
"GGGPIPD",
"GGGPPID",
"GGICCGCD",
"GGICCID",
"GGICDD",
"GGICFID",
"GGICJCD",
"GGICJDD",
"GGICPCCID",
"GGICPD",
"GGIDID",
"GGIDWGD",
"GGIFCCD",
"GGIFCD",
"GGIFCJD",
"GGIFICD",
"GGIFID",
"GGIFIDDD",
"GGIFJD",
"GGIFMID",
"GGIGPFD",
"GGIICCD",
"GGIJCD",
"GGIJCID",
"GGIJDD",
"GGIJICD",
"GGIPCCD",
"GGIPDCCD",
"GGIPICD",
"GGIPMICD",
"GGJCCCCCD",
"GGJCCCCD",
"GGJCCICD",
"GGJCDD",
"GGJCGCD",
"GGJCICCD",
"GGJCICD",
"GGJGCD",
"GGJGCICD",
"GGJGCLCGCD",
"GGJICCD",
"GGJICJD",
"GGJICPCCD",
"GGJID",
"GGJIID",
"GGJJCD",
"GGJJCDD",
"GGJJCKD",
"GGJJID",
"GGJMID",
"GGJPCCCCD",
"GGJPCCD",
"GGJPCD",
"GGJPCJCD",
"GGJPCJPJCD",
"GGJPID",
"GGJPJD",
"GGKCCCD",
"GGKCD",
"GGKDD",
"GGLCCD",
"GGLCCPJD",
"GGLFCCCD",
"GGLGCJD",
"GGLGFID",
"GGLGPCD",
"GGLJCCD",
"GGLJCID",
"GGMFJD",
"GGMJCDGD",
"GGMPJD",
"GGPCCDD",
"GGPCDD",
"GGPCICD",
"GGPCID",
"GGPFCCD",
"GGPFCD",
"GGPFCID",
"GGPFJD",
"GGPGCD",
"GGPGID",
"GGPICFCD",
"GGPID",
"GGPIDD",
"GGPIID",
"GGPJCCCD",
"GGPJCCD",
"GGPJCCID",
"GGPJCJMD",
"GGPJID",
"GGPJKCCD",
"GGPJPCD",
"GGPPCID",
"GGPPDD",
"GGPPFCCD",
"GGPPICD",
"GGPPJCD",
"GGWCJD",
"GGWGWID",
"GGWIWCCD",
"GICCCCCCCD",
"GICCCDGD",
"GICCDDD",
"GICCDGCD",
"GICCDGD",
"GICCDWD",
"GICCDWGD",
"GICCFCCD",
"GICCICCD",
"GICCICD",
"GICCID",
"GICCJCCD",
"GICDCCCD",
"GICDDWGD",
"GICDGD",
"GICDGJCD",
"GICDID",
"GICDWD",
"GICFD",
"GICFID",
"GICGCD",
"GICICCCCCCCCPD",
"GICICDDGD",
"GICICDFD",
"GICIDGD",
"GICIFD",
"GICIIFID",
"GICJDD",
"GICJDGD",
"GICJJD",
"GICKD",
"GICPCD",
"GICPID",
"GICPIDD",
"GICWCWCWD",
"GIDCDD",
"GIDDCD",
"GIDDGD",
"GIDDWGD",
"GIDFCD",
"GIDGDCD",
"GIDJJD",
"GIFCCCD",
"GIFCD",
"GIFCJD",
"GIFFFWFWD",
"GIFGD",
"GIFICCCD",
"GIFID",
"GIFIDCD",
"GIFJD",
"GIFPD",
"GIFPDCD",
"GIGCCDMD",
"GIGGCD",
"GIGJD",
"GIGMD",
"GIICCCD",
"GIICCDD",
"GIICCDGD",
"GIICCDMCD",
"GIICDD",
"GIICID",
"GIIDFCD",
"GIIDGD",
"GIIDJCD",
"GIIFICD",
"GIIICCD",
"GIIJD",
"GIIPCD",
"GIIPD",
"GIJCCDD",
"GIJCCICD",
"GIJCCJD",
"GIJCDCD",
"GIJCDGD",
"GIJCDWCFD",
"GIJDCD",
"GIJICCD",
"GIJICD",
"GIJICDGD",
"GIJIDD",
"GIJJD",
"GIJJICJD",
"GIJPCD",
"GIJPID",
"GILGCD",
"GIMCID",
"GIMCPD",
"GIPCCCCCD",
"GIPCCCDGD",
"GIPCCDD",
"GIPDCCCCD",
"GIPDGD",
"GIPDWCCD",
"GIPFD",
"GIPID",
"GIPJCDD",
"GIPJD",
"GIWGFWDGD",
"GIWGWDD",
"GJCCCCCDCD",
"GJCCCCCDD",
"GJCCCCDD",
"GJCCCDCD",
"GJCCCDGCD",
"GJCCCDGD",
"GJCCCDLD",
"GJCCCDWD",
"GJCCCPD",
"GJCCDFDD",
"GJCCID",
"GJCCPD",
"GJCCWKWCD",
"GJCDMCCD",
"GJCICCD",
"GJCIWGWCCD",
"GJCJCDID",
"GJCKD",
"GJCLCCCD",
"GJCMIGD",
"GJCMWD",
"GJCPDCCD",
"GJDCDCD",
"GJFCCCD",
"GJFCCDWGCGD",
"GJFCDCD",
"GJFCDD",
"GJFCDWFCD",
"GJFID",
"GJGCCCCD",
"GJGCCCD",
"GJGCCD",
"GJGCCDD",
"GJGCCDID",
"GJGCMJD",
"GJGFJCD",
"GJGJCD",
"GJGJCJCD",
"GJGJCKDGD",
"GJGMCCD",
"GJGPCCCD",
"GJGPD",
"GJICCCCD",
"GJICCDCD",
"GJICDD",
"GJICJD",
"GJIDD",
"GJIDWCCCWD",
"GJIICD",
"GJIIID",
"GJIJCCD",
"GJIJCD",
"GJIJD",
"GJIPD",
"GJJCCDGD",
"GJJCDCCD",
"GJJCDCD",
"GJJCDD",
"GJJCID",
"GJJCPD",
"GJJDDCCD",
"GJJGCCCCD",
"GJJICCCCD",
"GJJICCDCD",
"GJJICD",
"GJJID",
"GJJIPD",
"GJJPD",
"GJKJCD",
"GJKPD",
"GJMCCCCD",
"GJMCCCD",
"GJMCD",
"GJMICD",
"GJMJCCCCD",
"GJMJCD",
"GJMPCDD",
"GJPCCCDGD",
"GJPCCDD",
"GJPCCDGD",
"GJPCCDID",
"GJPCID",
"GJPDCCD",
"GJPDDGD",
"GJPDGD",
"GJPID",
"GJPIDID",
"GJPJCD",
"GJPJMJCD",
"GJPPCCD",
"GJWGWCD",
"GKCCCCD",
"GKCCDCD",
"GKCCDD",
"GKCCDWD",
"GKCDDICD",
"GKCID",
"GKCJDDWD",
"GKCWWD",
"GKDCCD",
"GKDID",
"GKGDGCDD",
"GKGPDD",
"GKICD",
"GKJCCDGCD",
"GKJCCID",
"GKJCD",
"GKJD",
"GKJID",
"GKJIPCD",
"GKPCDFJD",
"GKPCJD",
"GKWD",
"GLCCCCCID",
"GLCCCDD",
"GLCDCGCD",
"GLCICD",
"GLCID",
"GLCJID",
"GLCMCICD",
"GLCPCCDGD",
"GLCWGWDGCD",
"GLFCCD",
"GLFD",
"GLFJJCD",
"GLFKCCCD",
"GLGCCD",
"GLGCDDFCD",
"GLGJCCD",
"GLICCCCD",
"GLICCCD",
"GLICCDD",
"GLIFD",
"GLIID",
"GLIJD",
"GLIWDWD",
"GLJCCPCJD",
"GLJCDD",
"GLJCID",
"GLJDCD",
"GLJGCCD",
"GLJGCDGD",
"GLKCIID",
"GLMCCDD",
"GLMCD",
"GLPCCCCD",
"GLPCCDD",
"GLPCCDDFD",
"GLPICD",
"GLPID",
"GLPJCD",
"GLPPCCDD",
"GLPWIWD",
"GMCCCDD",
"GMCCDCCCD",
"GMCCDCMD",
"GMCCDMD",
"GMCCDWD",
"GMCID",
"GMCIID",
"GMCKCD",
"GMDPDID",
"GMFCCD",
"GMFCD",
"GMFCDCDGD",
"GMFICD",
"GMGCD",
"GMGCDCD",
"GMICID",
"GMIID",
"GMJCCCD",
"GMMCDGDD",
"GMMICCD",
"GMPCCCD",
"GMPDGCD",
"GMPFD",
"GMPICD",
"GMPID",
"GMPMCCCD",
"GPCCCCCD",
"GPCCCCDGD",
"GPCCCDGD",
"GPCCCDLD",
"GPCCCID",
"GPCCCPD",
"GPCCDGD",
"GPCCDWD",
"GPCCGD",
"GPCCICD",
"GPCCID",
"GPCCJD",
"GPCCWCWCD",
"GPCDCJD",
"GPCDDCD",
"GPCDGFCD",
"GPCDIID",
"GPCFCCCDGD",
"GPCFD",
"GPCGCD",
"GPCICD",
"GPCIDD",
"GPCIDWCWD",
"GPCIID",
"GPCIJCD",
"GPCIPD",
"GPCJD",
"GPCKD",
"GPCPCD",
"GPCPCDD",
"GPCPD",
"GPCWWD",
"GPFCCDWD",
"GPFCCWD",
"GPFCDD",
"GPFCWFWD",
"GPFDD",
"GPFICD",
"GPFIGGPCD",
"GPFJCD",
"GPFJD",
"GPFJDCD",
"GPFPCDCD",
"GPFPD",
"GPGCCCCD",
"GPGCCD",
"GPGCD",
"GPGCDWGD",
"GPGCGCD",
"GPGCIICD",
"GPGCPCCD",
"GPGFFCD",
"GPGICCD",
"GPGICD",
"GPGID",
"GPGJCD",
"GPGJD",
"GPGPCGD",
"GPGPDD",
"GPGPGJCCD",
"GPGPICD",
"GPICCCCDGD",
"GPICDD",
"GPICDGD",
"GPICICD",
"GPIDFGD",
"GPIICD",
"GPIICDGD",
"GPIID",
"GPIJCCD",
"GPIJCD",
"GPIJCDD",
"GPIPCCCD",
"GPIPCCD",
"GPJCCCCD",
"GPJCCDD",
"GPJCDCPD",
"GPJCDFD",
"GPJCDGCD",
"GPJCDGD",
"GPJCID",
"GPJCIDD",
"GPJCJCD",
"GPJCPCCD",
"GPJFCCD",
"GPJFIDD",
"GPJICD",
"GPJID",
"GPJPCD",
"GPJPD",
"GPKD",
"GPLCCCCD",
"GPLCCD",
"GPLCD",
"GPLICD",
"GPLID",
"GPMCCCD",
"GPMCCD",
"GPMCD",
"GPMD",
"GPMDJCD",
"GPPCCDD",
"GPPCCDWD",
"GPPCDWCCCD",
"GPPDDD",
"GPPFD",
"GPPGCCCD",
"GPPGD",
"GPPGDGD",
"GPPICD",
"GPPIID",
"GPPIJD",
"GPPJCCD",
"GPPJCDFCD",
"GPPJDCD",
"GPPMJCD",
"GPPPCD",
"GPPPICD",
"GPWCCD",
"GPWGWCDGD",
"GWCCCCD",
"GWCWJD",
"GWGPPD",
"GWGWCCD",
"GWICCD",
"GWJD",
"GWPCPWD",
"ICCCDFPCCFGCCD",
"ICCCDGD",
"ICCCDGJD",
"ICCCID",
"ICCCWGWD",
"ICCCWGWDGD",
"ICCFJCWGFJCCD",
"ICCICCD",
"ICCWGWD",
"ICDCCCCD",
"ICDCDCD",
"ICDGD",
"ICDWGD",
"ICFCD",
"ICFCJCD",
"ICFDID",
"ICGD",
"ICGGD",
"ICICCD",
"ICIDWID",
"ICIWD",
"ICJCCD",
"ICJWGWCCD",
"ICWGWDD",
"IDDGCD",
"IDGD",
"IFCD",
"IFCICCDGD",
"IFGD",
"IFICCD",
"IFIDWGD",
"IFKD",
"IGCCCCDCCD",
"IGFGJCGDD",
"IGGDFCD",
"IGGPCGCD",
"IGICCD",
"IIGCD",
"IIICD",
"IIID",
"IIJD",
"IIWGWCD",
"IIWGWD",
"IJCCDGD",
"IJCCWGWD",
"IJCDCCD",
"IJCDD",
"IJDD",
"IJGCCD",
"IJPCCD",
"IJPCD",
"IKCD",
"IMCCCCD",
"IPCCCCD",
"IPCGD",
"IPFD",
"IPJCD",
"IPPD",
"IPPDCD",
"IPPJCD",
"IPWGWCCD",
"IPWGWCD",
"IWGMFCCDGD",
"IWGWCCD",
"IWGWCD",
"IWGWD",
"IWGWID",
"JCCCWFWD",
"JCCGD",
"JCCPD",
"JCCWGWD",
"JCDCWMWMWCCCD",
"JCDDWD",
"JCGDWCPWD",
"JCIDGD",
"JCIWGWD",
"JCJGCD",
"JCPCD",
"JCWGWCDWD",
"JDGCCD",
"JDWGWCD",
"JFCCD",
"JFCCWGWDD",
"JFID",
"JFJCCD",
"JGCCD",
"JGCCGCD",
"JGCDFCD",
"JGCFCCCD",
"JGFD",
"JGICD",
"JGID",
"JGJDCD",
"JGMCCD",
"JGPCCD",
"JICCD",
"JIGPCD",
"JIICD",
"JIPCD",
"JJCCCDGD",
"JJCCJD",
"JJCGD",
"JJCID",
"JJCWGWCCDFGCD",
"JJCWGWD",
"JJLWGWCD",
"JJPJJD",
"JJPPCD",
"JPCCCCWGWD",
"JPCCCD",
"JPIWGWID",
"JPJJD",
"JPWWD",
"JWWFWCD",
"KCCCCDD",
"KCCDGD",
"KCDGCD",
"KCDGD",
"KCFCD",
"KCICCD",
"KCJCCCDGD",
"KFCCD",
"KGCDCCCD",
"KGCDD",
"KGCDGD",
"KGCGCD",
"KGGCDID",
"KIDJCCD",
"KIICD",
"KJGCD",
"KLPCCD",
"KPFCCD",
"KPGCD",
"KPICD",
"KPKICD",
"LCCCDGCD",
"LCGFCD",
"LCWGWCFDD",
"LCWGWICD",
"LFCCD",
"LFCWGWCD",
"LFJD",
"LGCCCD",
"LGCCD",
"LGCICD",
"LGCPJCD",
"LGFCCD",
"LGFCD",
"LGFD",
"LGFICD",
"LGFID",
"LGFJD",
"LGFPD",
"LGICCDGCD",
"LGICD",
"LGID",
"LGIICD",
"LGJCCD",
"LGJCID",
"LGLIDD",
"LGPCCD",
"LGPCD",
"LGPDDWD",
"LGPJPD",
"LIJD",
"LIWGWD",
"LJFCCD",
"LJFGCD",
"LKGPD",
"LKPJCCD",
"LMPJIPCCD",
"LPCGDGD",
"LPPWCCD",
"MCCID",
"MCCWGWD",
"MCJCCD",
"MCWD",
"MJCWGWD",
"MPCFCD",
"MPCGD",
"PCCGGCCCCD",
"PCCKD",
"PCDDDCCD",
"PCJD",
"PCJJDD",
"PCLGCCD",
"PCWGWCD",
"PCWGWID",
"PCWGWPDWD",
"PFCCCWGWD",
"PFCCJICCD",
"PFCD",
"PFCPCDCD",
"PFCWGWCD",
"PFID",
"PFPID",
"PGCCD",
"PGCCDGD",
"PGCID",
"PGCJCJD",
"PGFCD",
"PGJCCD",
"PGJICD",
"PGPCCCJD",
"PGPCCD",
"PGPD",
"PGPID",
"PIGFCD",
"PIWGWCCD",
"PJCCCD",
"PJCDD",
"PJCWGWDID",
"PJDFFD",
"PLDWGWCDCGD",
"PPCCCDD",
"PPCCD",
"PPCCDGD",
"PPCCGWD",
"PPCCWGWD",
"PPCID",
"PPCPWCCCWCDD",
"PPCWGWD",
"PPDD",
"PPFWGWCD",
"PPICD",
"PPIDD",
"PPLWGWCCD",
"PPWGWCD",
"PPWGWD",
"PWCCCD",
"PWCWCD",
"PWFWCD",
"PWWJWGWD",
"WCCCD",
"WCJWD",
"WFFPCWJD",
"WFPWPWCCD",
"WFWD",
"WGCJDWFCCCD",
"WJCCD",
"WPWJD",
"WWIWWCWD",
]
| 14.286566 | 39 | 0.439029 |
4fea8a472c077e52cfe80bb80684a1229bd92a2d | 842 | py | Python | scripts/script2_someprocessors.py | SMV818VMS/crispr_detection | abecc9289afe0802f641b3703b24838ea6e0c796 | [
"MIT"
] | 1 | 2017-11-27T04:53:38.000Z | 2017-11-27T04:53:38.000Z | scripts/script2_someprocessors.py | SMV818VMS/crispr_detection | abecc9289afe0802f641b3703b24838ea6e0c796 | [
"MIT"
] | null | null | null | scripts/script2_someprocessors.py | SMV818VMS/crispr_detection | abecc9289afe0802f641b3703b24838ea6e0c796 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import script1_functions as func
# # Process insertion files:
# func.insertion_processor("../ref_data/NC_000912transposoninsertions.gbk", "InsTh41", "../ref_data/insTh41_positions.txt")
# func.insertion_processor("../ref_data/NC_000912transposoninsertions.gbk", "InsTh7", "../ref_data/insTh7_positions.txt")
# # Process coding fasta file:
# func.coding_coordinates("../ref_data/coding_mpnM129.txt", "../ref_data/gene_coordinates.txt")
# # Order the total file (after concatenation!)
# func.order_total_file("../results/total.txt")
# # Convert the gene names file to other having the same coordinates but with the MPN nomenclature:
# func.genename_associator("../ref_data/gene_mpn_relation.gbk", "../ref_data/gene_coordinates.txt")
# Order and process the file of oligo killers:
func.process_total_killer_file()
| 40.095238 | 123 | 0.769596 |
7fbb8a83cbd6fb3589757f41bf455968c2866c36 | 3,221 | py | Python | tests/wallet/test_backup.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 44 | 2021-07-06T10:09:06.000Z | 2022-02-09T04:30:14.000Z | tests/wallet/test_backup.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 67 | 2021-07-06T11:57:18.000Z | 2022-02-02T16:14:15.000Z | tests/wallet/test_backup.py | grayfallstown/greendoge-blockchain | 31e325913374d694dc0859140d006a642e7f95ac | [
"Apache-2.0"
] | 16 | 2021-07-06T10:36:37.000Z | 2022-03-15T08:35:16.000Z | # import asyncio
# from pathlib import Path
# from secrets import token_bytes
#
# import pytest
#
# from greendoge.consensus.block_rewards import calculate_pool_reward, calculate_base_farmer_reward
# from greendoge.simulator.simulator_protocol import FarmNewBlockProtocol
# from greendoge.types.peer_info import PeerInfo
# from greendoge.util.ints import uint16, uint32, uint64
# from tests.setup_nodes import setup_simulators_and_wallets
# from greendoge.wallet.cc_wallet.cc_wallet import CCWallet
# from tests.time_out_assert import time_out_assert
#
#
# @pytest.fixture(scope="module")
# def event_loop():
# loop = asyncio.get_event_loop()
# yield loop
#
#
# class TestCCWalletBackup:
# @pytest.fixture(scope="function")
# async def two_wallet_nodes(self):
# async for _ in setup_simulators_and_wallets(1, 1, {}):
# yield _
#
# @pytest.mark.asyncio
# async def test_coin_backup(self, two_wallet_nodes):
# num_blocks = 3
# full_nodes, wallets = two_wallet_nodes
# full_node_api = full_nodes[0]
# full_node_server = full_node_api.full_node.server
# wallet_node, server_2 = wallets[0]
# wallet = wallet_node.wallet_state_manager.main_wallet
#
# ph = await wallet.get_new_puzzlehash()
#
# await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
# for i in range(1, num_blocks):
# await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
#
# funds = sum(
# [
# calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
# for i in range(1, num_blocks - 1)
# ]
# )
#
# await time_out_assert(15, wallet.get_confirmed_balance, funds)
#
# cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
#
# for i in range(1, num_blocks):
# await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
#
# await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
# await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
#
# # Write backup to file
# filename = f"test-backup-{token_bytes(16).hex()}"
# file_path = Path(filename)
# await wallet_node.wallet_state_manager.create_wallet_backup(file_path)
#
# # Close wallet and restart
# db_path = wallet_node.wallet_state_manager.db_path
# wallet_node._close()
# await wallet_node._await_closed()
#
# db_path.unlink()
#
# started = await wallet_node._start()
# assert started is False
#
# await wallet_node._start(backup_file=file_path)
#
# await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), wallet_node.on_connect)
#
# all_wallets = wallet_node.wallet_state_manager.wallets
# assert len(all_wallets) == 2
#
# cc_wallet_from_backup = wallet_node.wallet_state_manager.wallets[2]
#
# await time_out_assert(15, cc_wallet_from_backup.get_confirmed_balance, 100)
# if file_path.exists():
# file_path.unlink()
| 37.022989 | 116 | 0.686433 |
d9a6e728e26743eeecbb2e1be56a675e64b24c16 | 296 | py | Python | pydoge/connection_dbcursor.py | ezeportela/pydoge | 89ba8c4537cf22470caa51a698b9462f5522c079 | [
"MIT"
] | null | null | null | pydoge/connection_dbcursor.py | ezeportela/pydoge | 89ba8c4537cf22470caa51a698b9462f5522c079 | [
"MIT"
] | null | null | null | pydoge/connection_dbcursor.py | ezeportela/pydoge | 89ba8c4537cf22470caa51a698b9462f5522c079 | [
"MIT"
] | null | null | null | class ConnectionDbCursor:
def __init__(self, connection):
self.connection = connection
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exception_type, exception_value, traceback):
self.cursor.close()
| 26.909091 | 67 | 0.682432 |
fb6ed0bc3f254ee331838ce04842961dcf87bb30 | 5,334 | py | Python | tensorflow/tensorboard/plugins/scalars/scalars_plugin_test.py | salonirk11/tensorflow | 7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7 | [
"Apache-2.0"
] | 4 | 2019-04-14T13:33:17.000Z | 2021-09-22T02:33:29.000Z | tensorflow/tensorboard/plugins/scalars/scalars_plugin_test.py | salonirk11/tensorflow | 7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7 | [
"Apache-2.0"
] | 1 | 2021-02-02T23:00:07.000Z | 2021-02-02T23:00:07.000Z | tensorflow/tensorboard/plugins/scalars/scalars_plugin_test.py | salonirk11/tensorflow | 7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7 | [
"Apache-2.0"
] | 2 | 2017-07-16T13:54:08.000Z | 2018-05-21T09:02:34.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Scalars Plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os.path
from six import StringIO
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins.scalars import scalars_plugin
class ScalarsPluginTest(tf.test.TestCase):
_STEPS = 99
_SCALAR_TAG = 'simple-values'
_HISTOGRAM_TAG = 'complicated-values'
_RUN_WITH_SCALARS = '_RUN_WITH_SCALARS'
_RUN_WITH_HISTOGRAM = '_RUN_WITH_HISTOGRAM'
def set_up_with_runs(self, run_names):
self.logdir = self.get_temp_dir()
for run_name in run_names:
self.generate_run(run_name)
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
self.plugin = scalars_plugin.ScalarsPlugin()
self.apps = self.plugin.get_plugin_apps(multiplexer, None)
def generate_run(self, run_name):
if run_name == self._RUN_WITH_SCALARS:
(use_scalars, use_histogram) = (True, False)
elif run_name == self._RUN_WITH_HISTOGRAM:
(use_scalars, use_histogram) = (False, True)
else:
assert False, 'Invalid run name: %r' % run_name
tf.reset_default_graph()
sess = tf.Session()
if use_scalars:
scalar_placeholder = tf.placeholder(tf.int64)
tf.summary.scalar(self._SCALAR_TAG, scalar_placeholder)
if use_histogram:
histogram_placeholder = tf.placeholder(tf.float32, shape=[3])
tf.summary.histogram(self._HISTOGRAM_TAG, histogram_placeholder)
summ = tf.summary.merge_all()
subdir = os.path.join(self.logdir, run_name)
writer = tf.summary.FileWriter(subdir)
writer.add_graph(sess.graph)
for step in xrange(self._STEPS):
feed_dict = {}
if use_scalars:
feed_dict[scalar_placeholder] = int((43**step) % 47)
if use_histogram:
feed_dict[histogram_placeholder] = [1 + step, 2 + step, 3 + step]
s = sess.run(summ, feed_dict=feed_dict)
writer.add_summary(s, global_step=step)
writer.close()
def test_index(self):
self.set_up_with_runs([self._RUN_WITH_SCALARS, self._RUN_WITH_HISTOGRAM])
self.assertEqual({
self._RUN_WITH_SCALARS: [self._SCALAR_TAG],
self._RUN_WITH_HISTOGRAM: [],
}, self.plugin.index_impl())
def _test_scalars_json(self, run_name, should_have_scalars):
self.set_up_with_runs([self._RUN_WITH_SCALARS, self._RUN_WITH_HISTOGRAM])
if should_have_scalars:
(data, mime_type) = self.plugin.scalars_impl(
self._SCALAR_TAG, run_name, scalars_plugin.OutputFormat.JSON)
self.assertEqual('application/json', mime_type)
self.assertEqual(len(data), self._STEPS)
else:
with self.assertRaises(KeyError):
self.plugin.scalars_impl(self._SCALAR_TAG, run_name,
scalars_plugin.OutputFormat.JSON)
def _test_scalars_csv(self, run_name, should_have_scalars):
self.set_up_with_runs([self._RUN_WITH_SCALARS, self._RUN_WITH_HISTOGRAM])
if should_have_scalars:
(data, mime_type) = self.plugin.scalars_impl(
self._SCALAR_TAG, run_name, scalars_plugin.OutputFormat.CSV)
self.assertEqual('text/csv', mime_type)
s = StringIO(data)
reader = csv.reader(s)
self.assertEqual(['Wall time', 'Step', 'Value'], next(reader))
self.assertEqual(len(list(reader)), self._STEPS)
else:
with self.assertRaises(KeyError):
self.plugin.scalars_impl(self._SCALAR_TAG, run_name,
scalars_plugin.OutputFormat.CSV)
def test_scalars_json_with_scalars(self):
self._test_scalars_json(self._RUN_WITH_SCALARS, True)
def test_scalars_json_with_histogram(self):
self._test_scalars_json(self._RUN_WITH_HISTOGRAM, False)
def test_scalars_csv_with_scalars(self):
self._test_scalars_csv(self._RUN_WITH_SCALARS, True)
def test_scalars_csv_with_histogram(self):
self._test_scalars_csv(self._RUN_WITH_HISTOGRAM, False)
def test_active_with_scalars(self):
self.set_up_with_runs([self._RUN_WITH_SCALARS])
self.assertTrue(self.plugin.is_active())
def test_active_with_histogram(self):
self.set_up_with_runs([self._RUN_WITH_HISTOGRAM])
self.assertFalse(self.plugin.is_active())
def test_active_with_both(self):
self.set_up_with_runs([self._RUN_WITH_SCALARS, self._RUN_WITH_HISTOGRAM])
self.assertTrue(self.plugin.is_active())
if __name__ == '__main__':
tf.test.main()
| 37.041667 | 80 | 0.72216 |
0f62b3dda2a070a46ffaabd9c112a5474ec63b17 | 105 | py | Python | traptransmitter/apps.py | FelixTheC/onlineOrderForm | 6f606fe7baa56e94d48278da8258b5eb33a3590d | [
"Apache-2.0"
] | null | null | null | traptransmitter/apps.py | FelixTheC/onlineOrderForm | 6f606fe7baa56e94d48278da8258b5eb33a3590d | [
"Apache-2.0"
] | null | null | null | traptransmitter/apps.py | FelixTheC/onlineOrderForm | 6f606fe7baa56e94d48278da8258b5eb33a3590d | [
"Apache-2.0"
] | 1 | 2021-04-01T16:52:35.000Z | 2021-04-01T16:52:35.000Z | from django.apps import AppConfig
class TraptransmitterConfig(AppConfig):
name = 'traptransmitter'
| 17.5 | 39 | 0.790476 |
ae66526a580dea263990f44c5a4b27ab276132f8 | 474 | py | Python | solutions/Longest Palindromic Substring/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | 3 | 2021-06-06T22:03:15.000Z | 2021-06-08T08:49:04.000Z | solutions/Longest Palindromic Substring/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | solutions/Longest Palindromic Substring/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | class Solution:
def longestPalindrome(self, s: str) -> str:
res = ""
for i in range(len(s)):
tmp = self.helper(s, i, i)
if len(tmp) > len(res):
res = tmp
tmp = self.helper(s, i, i+1)
if len(tmp) > len(res):
res = tmp
return res
def helper(self, s, l, r):
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1; r += 1
return s[l+1:r]
| 27.882353 | 53 | 0.413502 |
ed2ba7e53bc4df1991c713c0f8f6b1cd7688d90c | 3,023 | py | Python | trainer.py | blackredscarf/pytorch-DQN | 3b4abf540860e805e2b8a70cd09c818863c09233 | [
"Apache-2.0"
] | 21 | 2019-09-20T15:33:49.000Z | 2022-02-28T08:49:33.000Z | trainer.py | blackredscarf/pytorch-DQN | 3b4abf540860e805e2b8a70cd09c818863c09233 | [
"Apache-2.0"
] | 1 | 2020-09-17T19:37:53.000Z | 2020-09-17T19:37:53.000Z | trainer.py | blackredscarf/pytorch-DQN | 3b4abf540860e805e2b8a70cd09c818863c09233 | [
"Apache-2.0"
] | 14 | 2020-01-08T06:40:26.000Z | 2021-09-24T03:09:07.000Z | import math
import numpy as np
from config import Config
from core.logger import TensorBoardLogger
from core.util import get_output_folder
class Trainer:
def __init__(self, agent, env, config: Config):
self.agent = agent
self.env = env
self.config = config
# non-Linear epsilon decay
epsilon_final = self.config.epsilon_min
epsilon_start = self.config.epsilon
epsilon_decay = self.config.eps_decay
self.epsilon_by_frame = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(
-1. * frame_idx / epsilon_decay)
self.outputdir = get_output_folder(self.config.output, self.config.env)
self.agent.save_config(self.outputdir)
self.board_logger = TensorBoardLogger(self.outputdir)
def train(self, pre_fr=0):
losses = []
all_rewards = []
episode_reward = 0
ep_num = 0
is_win = False
state = self.env.reset()
for fr in range(pre_fr + 1, self.config.frames + 1):
epsilon = self.epsilon_by_frame(fr)
action = self.agent.act(state, epsilon)
next_state, reward, done, _ = self.env.step(action)
self.agent.buffer.add(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
loss = 0
if self.agent.buffer.size() > self.config.batch_size:
loss = self.agent.learning(fr)
losses.append(loss)
self.board_logger.scalar_summary('Loss per frame', fr, loss)
if fr % self.config.print_interval == 0:
print("frames: %5d, reward: %5f, loss: %4f episode: %4d" % (fr, np.mean(all_rewards[-10:]), loss, ep_num))
if fr % self.config.log_interval == 0:
self.board_logger.scalar_summary('Reward per episode', ep_num, all_rewards[-1])
if self.config.checkpoint and fr % self.config.checkpoint_interval == 0:
self.agent.save_checkpoint(fr, self.outputdir)
if done:
state = self.env.reset()
all_rewards.append(episode_reward)
episode_reward = 0
ep_num += 1
avg_reward = float(np.mean(all_rewards[-100:]))
self.board_logger.scalar_summary('Best 100-episodes average reward', ep_num, avg_reward)
if len(all_rewards) >= 100 and avg_reward >= self.config.win_reward and all_rewards[-1] > self.config.win_reward:
is_win = True
self.agent.save_model(self.outputdir, 'best')
print('Ran %d episodes best 100-episodes average reward is %3f. Solved after %d trials ✔' % (ep_num, avg_reward, ep_num - 100))
if self.config.win_break:
break
if not is_win:
print('Did not solve after %d episodes' % ep_num)
self.agent.save_model(self.outputdir, 'last')
| 39.776316 | 147 | 0.596427 |
c068da1e468f92cd2876573cda3857085d21917b | 6,316 | py | Python | tests/test_ext_autodoc_autoattribute.py | SamB/sphinx | bf010790ace78ba4bc4231445e73bcecf97e4947 | [
"BSD-2-Clause"
] | null | null | null | tests/test_ext_autodoc_autoattribute.py | SamB/sphinx | bf010790ace78ba4bc4231445e73bcecf97e4947 | [
"BSD-2-Clause"
] | null | null | null | tests/test_ext_autodoc_autoattribute.py | SamB/sphinx | bf010790ace78ba4bc4231445e73bcecf97e4947 | [
"BSD-2-Clause"
] | null | null | null | """Test the autodoc extension.
This tests mainly the Documenters; the auto directives are tested in a test
source file translated by test_build.
"""
import sys
import pytest
from .test_ext_autodoc import do_autodoc
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute(app):
actual = do_autodoc(app, 'attribute', 'target.Class.attr')
assert list(actual) == [
'',
'.. py:attribute:: Class.attr',
' :module: target',
" :value: 'bar'",
'',
' should be documented -- süß',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_novalue(app):
options = {'no-value': None}
actual = do_autodoc(app, 'attribute', 'target.Class.attr', options)
assert list(actual) == [
'',
'.. py:attribute:: Class.attr',
' :module: target',
'',
' should be documented -- süß',
'',
]
@pytest.mark.skipif(sys.version_info < (3, 6), reason='python 3.6+ is required.')
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_typed_variable(app):
actual = do_autodoc(app, 'attribute', 'target.typed_vars.Class.attr2')
assert list(actual) == [
'',
'.. py:attribute:: Class.attr2',
' :module: target.typed_vars',
' :type: int',
'',
]
@pytest.mark.skipif(sys.version_info < (3, 6), reason='python 3.6+ is required.')
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_typed_variable_in_alias(app):
actual = do_autodoc(app, 'attribute', 'target.typed_vars.Alias.attr2')
assert list(actual) == [
'',
'.. py:attribute:: Alias.attr2',
' :module: target.typed_vars',
' :type: int',
'',
]
@pytest.mark.skipif(sys.version_info < (3, 6), reason='python 3.6+ is required.')
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_instance_variable(app):
actual = do_autodoc(app, 'attribute', 'target.typed_vars.Class.attr4')
assert list(actual) == [
'',
'.. py:attribute:: Class.attr4',
' :module: target.typed_vars',
' :type: int',
'',
' attr4',
'',
]
@pytest.mark.skipif(sys.version_info < (3, 6), reason='python 3.6+ is required.')
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_instance_variable_in_alias(app):
actual = do_autodoc(app, 'attribute', 'target.typed_vars.Alias.attr4')
assert list(actual) == [
'',
'.. py:attribute:: Alias.attr4',
' :module: target.typed_vars',
' :type: int',
'',
' attr4',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_instance_variable_without_comment(app):
actual = do_autodoc(app, 'attribute', 'target.instance_variable.Bar.attr4')
assert list(actual) == [
'',
'.. py:attribute:: Bar.attr4',
' :module: target.instance_variable',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_slots_variable_list(app):
actual = do_autodoc(app, 'attribute', 'target.slots.Foo.attr')
assert list(actual) == [
'',
'.. py:attribute:: Foo.attr',
' :module: target.slots',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_slots_variable_dict(app):
actual = do_autodoc(app, 'attribute', 'target.slots.Bar.attr1')
assert list(actual) == [
'',
'.. py:attribute:: Bar.attr1',
' :module: target.slots',
' :type: int',
'',
' docstring of attr1',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_slots_variable_str(app):
actual = do_autodoc(app, 'attribute', 'target.slots.Baz.attr')
assert list(actual) == [
'',
'.. py:attribute:: Baz.attr',
' :module: target.slots',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_GenericAlias(app):
actual = do_autodoc(app, 'attribute', 'target.genericalias.Class.T')
if sys.version_info < (3, 7):
assert list(actual) == [
'',
'.. py:attribute:: Class.T',
' :module: target.genericalias',
' :value: typing.List[int]',
'',
' A list of int',
'',
]
else:
assert list(actual) == [
'',
'.. py:attribute:: Class.T',
' :module: target.genericalias',
'',
' A list of int',
'',
' alias of :py:class:`~typing.List`\\ [:py:class:`int`]',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_NewType(app):
actual = do_autodoc(app, 'attribute', 'target.typevar.Class.T6')
assert list(actual) == [
'',
'.. py:attribute:: Class.T6',
' :module: target.typevar',
'',
' T6',
'',
' alias of :py:class:`~datetime.date`',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_TypeVar(app):
actual = do_autodoc(app, 'attribute', 'target.typevar.Class.T1')
assert list(actual) == [
'',
'.. py:attribute:: Class.T1',
' :module: target.typevar',
'',
' T1',
'',
" alias of TypeVar('T1')",
'',
]
@pytest.mark.skipif(sys.version_info < (3, 6), reason='python 3.6+ is required.')
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoattribute_hide_value(app):
actual = do_autodoc(app, 'attribute', 'target.hide_value.Foo.SENTINEL1')
assert list(actual) == [
'',
'.. py:attribute:: Foo.SENTINEL1',
' :module: target.hide_value',
'',
' docstring',
'',
' :meta hide-value:',
'',
]
actual = do_autodoc(app, 'attribute', 'target.hide_value.Foo.SENTINEL2')
assert list(actual) == [
'',
'.. py:attribute:: Foo.SENTINEL2',
' :module: target.hide_value',
'',
' :meta hide-value:',
'',
]
| 28.071111 | 81 | 0.556681 |
801e5b6733cc781027b2dfeef1e202ba6a55e686 | 2,601 | py | Python | process.py | Jip-Hop/polargraph-optimizer | 5c7b201554c01148d90597867436c27e03b11a33 | [
"MIT"
] | null | null | null | process.py | Jip-Hop/polargraph-optimizer | 5c7b201554c01148d90597867436c27e03b11a33 | [
"MIT"
] | null | null | null | process.py | Jip-Hop/polargraph-optimizer | 5c7b201554c01148d90597867436c27e03b11a33 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import print_function
from lib import *
######################
instructions = []
setup_finished = False
import fileinput
for line in fileinput.input():
# Don't process the initial setup instructions,
# write to output file immediately
if not setup_finished:
print(line.strip())
if(line.split(' ')[0] == 'G00'):
setup_finished = True
else:
instructions.append(Instruction(line))
glyphs = []
chunk = []
for inst in instructions:
chunk.append(inst)
if inst.typename == 'penup':
if len(chunk) > 1:
glyphs.append(Glyph(chunk))
chunk = []
print("Total Glyphs: %d" % len(glyphs), file=sys.stderr)
# No sorting
print("Initial penup distance: %9d" % total_penup_travel(glyphs), file=sys.stderr)
print("Initial total distance: %9d" % total_travel(glyphs), file=sys.stderr)
# dedupe alone (and used below)
glyphs = list(dedupe(glyphs))
print("Deduped penup distance: %9d" % total_penup_travel(glyphs), file=sys.stderr)
print("Deduped total distance: %9d" % total_travel(glyphs), file=sys.stderr)
# easy sort: sort all glyphs by starting point
#
# This is O(n log n) because it's simply a sort.
sorted_g = sorted(glyphs,
key=lambda st: st.start or tuple()) # add default key in case 'start' is missing.
print("Sorted penup distance: %9d" % total_penup_travel(sorted_g), file=sys.stderr)
print("Sorted total distance: %9d" % total_travel(sorted_g), file=sys.stderr)
i = 0
greedy = reorder_greedy(glyphs, index=i)
print("Greedy penup (i=%1d) %9d" % (i, total_penup_travel(greedy)), file=sys.stderr)
print("Greedy total (i=%1d) %9d" % (i, total_travel(greedy)), file=sys.stderr)
# Render down from Glyphs -> Instructions
instructions = list(iter_instructions(greedy))
print("Total instructions: %9d" % (len(instructions),), file=sys.stderr)
# Remove penup / move / pendown sequences that don't actually move anywhere.
pruned_instructions = list(prune_small_distance_penups(instructions))
print("Pruned instructions: %9d" % (len(pruned_instructions),), file=sys.stderr)
# Turn G1 into G0 if pen is down, cleanup, remove duplicates.
cleaned_instructions = list(clean_instructions(pruned_instructions))
print("Clean instructions: %9d" % (len(cleaned_instructions),), file=sys.stderr)
for i in cleaned_instructions:
print(i.line)
# Next up: try flipping the ordering of individual glyphs in greedy sort
#
# Other ideas:
# - Divide drawing into subregions and then optimize each region
# - Full O(n!) exhaustive search (eek!)
# - ???
| 34.68 | 100 | 0.694733 |
ae9e166c6b43325b0bd19bd06b8c2fd2c67f08ed | 6,561 | py | Python | litex_boards/targets/lattice_ice40up5k_evn.py | AEW2015/litex-boards | e98ddd58d3dbe2229d04281d43cb0d13a06527ea | [
"BSD-2-Clause"
] | 177 | 2019-06-13T09:54:49.000Z | 2022-03-29T02:25:13.000Z | litex_boards/targets/lattice_ice40up5k_evn.py | zeldin/litex-boards | d52859d9ef5d8d210118c01ce89e29404ac8d7c6 | [
"BSD-2-Clause"
] | 347 | 2019-06-12T17:47:45.000Z | 2022-03-30T21:59:01.000Z | litex_boards/targets/lattice_ice40up5k_evn.py | zeldin/litex-boards | d52859d9ef5d8d210118c01ce89e29404ac8d7c6 | [
"BSD-2-Clause"
] | 202 | 2019-06-11T15:01:26.000Z | 2022-03-31T16:25:19.000Z | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 Sean Cross <sean@xobs.io>
# Copyright (c) 2018 David Shah <dave@ds0.me>
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import sys
import argparse
target="lattice_ice40up5k_evn"
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import lattice_ice40up5k_evn
from litex.build.lattice.programmer import IceStormProgrammer
from litex.soc.cores.ram import Up5kSPRAM
from litex.soc.cores.clock import iCE40PLL
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
kB = 1024
mB = 1024*kB
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
assert sys_clk_freq == 12e6
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
# # #
# Clk/Rst
sys = platform.request("clk12")
platform.add_period_constraint(sys, 1e9/12e6)
# Power On Reset
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(ClockSignal("sys"))
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
# Sys Clk
self.comb += self.cd_sys.clk.eq(sys)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~por_done)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
mem_map = {**SoCCore.mem_map, **{"spiflash": 0x80000000}}
def __init__(self, bios_flash_offset, sys_clk_freq=int(12e6), with_led_chaser=True, **kwargs):
platform = lattice_ice40up5k_evn.Platform()
# Disable Integrated ROM/SRAM since too large for iCE40 and UP5K has specific SPRAM.
kwargs["integrated_sram_size"] = 0
kwargs["integrated_rom_size"] = 0
# Set CPU variant / reset address
kwargs["cpu_reset_address"] = self.mem_map["spiflash"] + bios_flash_offset
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Lattice iCE40UP5k EVN breakout board",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# 128KB SPRAM (used as SRAM) ---------------------------------------------------------------
self.submodules.spram = Up5kSPRAM(size=128*kB)
self.bus.add_slave("sram", self.spram.bus, SoCRegion(size=128*kB))
# SPI Flash --------------------------------------------------------------------------------
# 4x mode is not possible on this board since WP and HOLD pins are not connected to the FPGA
from litespi.modules import N25Q032A
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="1x", module=N25Q032A(Codes.READ_1_1_1))
# Add ROM linker region --------------------------------------------------------------------
self.bus.add_region("rom", SoCRegion(
origin = self.mem_map["spiflash"] + bios_flash_offset,
size = 32*kB,
linker = True)
)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led_n"),
sys_clk_freq = sys_clk_freq)
# Add a UART-Wishbone bridge -----------------------------------------
debug_uart=False
if debug_uart:
# This will add a bridge on the second serial port defined in platform
from litex.soc.cores.uart import UARTWishboneBridge
self.submodules.uart_bridge = UARTWishboneBridge(
platform.request("serial"),
sys_clk_freq,
baudrate=115200)
self.add_wb_master(self.uart_bridge.wishbone)
# Flash --------------------------------------------------------------------------------------------
def flash(bios_flash_offset):
from litex.build.dfu import DFUProg
prog = IceStormProgrammer()
bitstream = open("build/"+target+"/gateware/"+target+".bin", "rb")
bios = open("build/"+target+"/software/bios/bios.bin", "rb")
image = open("build/"+target+"/image.bin", "wb")
# Copy bitstream at 0x00000000
for i in range(0x00000000, 0x0020000):
b = bitstream.read(1)
if not b:
image.write(0xff.to_bytes(1, "big"))
else:
image.write(b)
# Copy bios at 0x00020000
for i in range(0x00000000, 0x00010000):
b = bios.read(1)
if not b:
image.write(0xff.to_bytes(1, "big"))
else:
image.write(b)
bitstream.close()
bios.close()
image.close()
print("Flashing bitstream (+bios)")
prog.flash(0x0, "build/"+target+"/image.bin")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Lattice iCE40UP5k EVN breakout board")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--sys-clk-freq", default=12e6, help="System clock frequency (default: 12MHz)")
parser.add_argument("--bios-flash-offset", default=0x20000, help="BIOS offset in SPI Flash (default: 0x20000)")
parser.add_argument("--flash", action="store_true", help="Flash Bitstream")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
bios_flash_offset = args.bios_flash_offset,
sys_clk_freq = int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.flash:
flash(args.bios_flash_offset)
if __name__ == "__main__":
main()
| 38.368421 | 119 | 0.556165 |
a8d66a7e008d1abb9c5577c77fd84b4bbd0a8d50 | 1,448 | py | Python | sa/profiles/Qtech/BFC_PBIC_S/get_interface_status_ex.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Qtech/BFC_PBIC_S/get_interface_status_ex.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Qtech/BFC_PBIC_S/get_interface_status_ex.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Qtech.BFC_PBIC_S.get_interface_status_ex
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatusex import IGetInterfaceStatusEx
class Script(BaseScript):
name = "Qtech.BFC_PBIC_S.get_interface_status_ex"
interface = IGetInterfaceStatusEx
requires = []
def execute_snmp(self, interfaces=None):
result = []
for v in self.snmp.getnext("1.3.6.1.3.55.1.3.1.1", max_repetitions=3, cached=True):
name = v[1]
status = self.snmp.get("1.3.6.1.3.55.1.3.1.4.%s" % name)
if status == 0:
admin_status = True
oper_status = True
else:
admin_status = False
oper_status = False
r = {"interface": name, "admin_status": admin_status, "oper_status": oper_status}
result += [r]
r = {
"interface": "eth0",
"admin_status": True,
"oper_status": True,
"full_duplex": True,
"in_speed": 10000,
"out_speed": 10000,
}
result += [r]
return result
| 33.674419 | 93 | 0.483425 |
0f97e28ce440e2b41e9d598bb456861a246b4337 | 1,011 | py | Python | ambari-server/src/main/resources/stacks/VDH/2.4.1/hooks/before-ANY/scripts/hook.py | vsosrc/ambari | e3cc898672707bedf7597f2e16d684c8a00bba3b | [
"Apache-2.0"
] | 5 | 2018-06-03T05:19:40.000Z | 2021-04-16T17:10:49.000Z | ambari-server/src/main/resources/stacks/VDH/2.4.1/hooks/before-ANY/scripts/hook.py | vsosrc/ambari | e3cc898672707bedf7597f2e16d684c8a00bba3b | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/VDH/2.4.1/hooks/before-ANY/scripts/hook.py | vsosrc/ambari | e3cc898672707bedf7597f2e16d684c8a00bba3b | [
"Apache-2.0"
] | 6 | 2019-05-07T13:24:39.000Z | 2021-02-15T14:12:37.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from shared_initialization import *
class BeforeAnyHook(Hook):
def hook(self, env):
import params
env.set_params(params)
setup_jce()
if __name__ == "__main__":
BeforeAnyHook().execute()
| 29.735294 | 72 | 0.772502 |
2877f0861044919663885b76e012b15f026ef7a6 | 32,265 | py | Python | FashionMNISTFULL.py | avmoldovan/CNN-TE | 87fd7f97e4a7b16617b0404a34292dae03b55ece | [
"Apache-2.0"
] | 1 | 2022-03-15T06:22:35.000Z | 2022-03-15T06:22:35.000Z | FashionMNISTFULL.py | avmoldovan/CNN-TE | 87fd7f97e4a7b16617b0404a34292dae03b55ece | [
"Apache-2.0"
] | null | null | null | FashionMNISTFULL.py | avmoldovan/CNN-TE | 87fd7f97e4a7b16617b0404a34292dae03b55ece | [
"Apache-2.0"
] | null | null | null | from torchvision import transforms, datasets as ds
from torch.utils.data import DataLoader
import torchvision as tv
from torchvision.transforms import ToPILImage
import torch as t
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.backends.cudnn as cudnn
# import matplotlib.pyplot as plt
import numpy as np
from torchvision.models import AlexNet
import torch
from torch.tensor import *
import torch.nn as nn
from skimage.util import view_as_windows
import numpy as np
import torch.nn.functional as F
from torch.hub import load_state_dict_from_url
# from fastai.callbacks.hooks import *
from PyTE.TEDiscrete import TEDiscrete
from NNutils import *
from collections import OrderedDict
import math
# from TEDiscreteOnline import TEDiscreteOnline
# from FBLinear import FBLinear
import itertools
import random
import pandas as pd
import datetime as dt
import sys
# from TorchStandardScaler import *
__all__ = ['FashionMNISTFULL', 'fashionmnistfull']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
DEBUG = False
class Context(object):
def __init__(self):
super(Context, self).__init__()
self.ctx = None
@classmethod
def create(cls, value):
if cls.ctx is not None:
return cls.ctx
else:
cls.ctx = Context()
return cls.ctx
@property
def ctx(self):
return self.ctx
class FashionMNISTFULL(AlexNet):
class GradUpdateFunc(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(input)
return input # input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = ctx.saved_tensors
grad_input = grad_output.clone()
# grad_input[input < 0] = 0
grad_input = torch.zeros(grad_input.shape)
return grad_input
def __str__(self):
return "FashionMNISTFULL"
def __init__(self, configs):
super(AlexNet, self).__init__()
self.configs = configs
self.batch_size = int(configs['batch_size'])
self.skip_first = int(configs['skip_first']) # 9#int(self.batch_size)
self.g1 = float(configs['tr1'])
self.g2 = float(configs['tr2'])
self.te_length = int(self.configs['te_length'])
self.withTE = bool(self.configs['withte'])
self.fwd = bool(self.configs['forward'])
DEBUG = configs['debug']
self.conv_te = True
self._forward_passes = 0
self.clean_window = bool(configs['clean_window'])
if self.batch_size == self.te_length or self.te_length == 0:
self.windowed = False
else:
self.windowed = True
self.window_batches = int(self.te_length / self.batch_size)
self.gpu = self.configs['gpu'] != 'cpu'
self.prev_epoch = 0
self.current_epoch = 0
# self.activations_size = math.floor(self.te_batch / configs['te_events_batch_multiple'] / configs[
# 'batch_size']) - self.skip_first ##this must be a batch_size multiple
# self.activations_size = math.floor(self.te_batch / configs['batch_size'] ) - self.skip_first ##this must be a batch_size multiple
# else:
# self.activations_size = math.floor(int(configs['epochs']) * \
# int(configs['trainingset_size']) / \
# int(configs['batch_size']))
if configs['partial_freeze'] and not configs['pretrained'] and not configs['evaluate']:
self.conv_te = False
else:
if not configs['base_retrain']:
self.conv_te = True
else:
self.conv_te = False
# TODO: remove this
self.conv_te = True
layers = []
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1) # padding 5
self.conv1.name = 'conv1'
layers.append(self.conv1)
self.batchnorm2d1 = nn.BatchNorm2d(32)
self.batchnorm2d1.name = 'batchnorm2d1'
layers.append(self.batchnorm2d1)
self.relu1 = nn.ReLU(inplace=True)
self.relu1.name = 'relu1'
layers.append(self.relu1)
self.maxPool1 = nn.MaxPool2d(kernel_size=2, stride=2) # kernel_size=2
self.maxPool1.name = 'maxPool1'
layers.append(self.maxPool1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.conv2.name = 'conv2'
layers.append(self.conv2)
self.batchnorm2d2 = nn.BatchNorm2d(64)
self.batchnorm2d2.name = 'batchnorm2d2'
layers.append(self.batchnorm2d2)
self.relu2 = nn.ReLU(inplace=True)
self.relu2.name = 'relu2'
layers.append(self.relu2)
self.maxPool2 = nn.MaxPool2d(kernel_size=2) # kernel_size=2
self.maxPool2.name = 'maxPool2'
layers.append(self.maxPool2)
self.fc1 = nn.Linear(64 * 6 * 6, 600)
self.fc1.name = 'fc1'
#layers.append(self.fc1)
self.drop1 = nn.Dropout(p=configs['dropout1'])
self.drop1.name = 'drop1'
#layers.append(self.drop1)
# self.relu3 = nn.ReLU(inplace=True)
# self.relu3.name = 'relu3'
# self.drop2 = nn.Dropout(p=configs['dropout2'])
# self.drop2.name = 'drop2'
self.fc7 = nn.Linear(600, 120)
self.fc7.name = 'fc7'
#layers.append(self.fc7)
self.features = nn.Sequential(*layers)
self.fc8 = nn.Linear(120, self.configs['num_classes'])
self.fc8.name = 'fc8'
# if backward==False:
# self.hook = module.register_forward_hook(self.hook_fn)
# else:
self.hook0 = self.fc7.weight.register_hook(self.hook0_fn)
self.hook = self.fc8.weight.register_hook(self.hook_fn)
# self.hookF = Hook(self.fc8)
# self.hookB = Hook(self.fc8, backward=True)
self.softmax = nn.Softmax(dim=1)
self.fc8activ = np.zeros(shape=(0, self.fc8.out_features), dtype=np.uint8) # .bool()
self.softmaxactiv = np.zeros(shape=(0, self.fc8.out_features), dtype=np.uint8) # .bool()
self.fc7tes = torch.zeros(size=(self.fc7.out_features, self.fc8.in_features), dtype=torch.float32) # bool()
self.fc7activ = torch.zeros(size=(self.fc7.out_features, 0), dtype=torch.uint8) # .bool()
self.fc7pairidx = torch.cartesian_prod(torch.arange(end=self.fc7.out_features), torch.arange(end=self.fc8.in_features))
self.fc7eye = torch.eye(self.fc7tes.shape[0], self.fc7tes.shape[1], dtype=torch.float32, requires_grad=True)
self.fc8pairidx = torch.cartesian_prod(torch.arange(end=self.fc8.out_features), torch.arange(end=self.fc8.out_features))
self.fc8tes = torch.zeros(size=(self.fc8.out_features, self.fc8.out_features), dtype=torch.float32) # bool()
self.fc8eye = torch.eye(self.fc8tes.shape[0], self.fc8tes.shape[1], dtype=torch.float32, requires_grad=True)
# initialize fcpai1 TE
self.averages = dict()
self.averages['fcpair1'] = dict()
self.averages['fcpair2'] = dict()
self.averages['fcpair3'] = dict()
self.inmatrix2 = torch.zeros(size=(self.fc7.out_features, self.fc8.out_features), dtype=torch.float32)
self.inmatrix3 = torch.zeros(size=(self.fc8.out_features, self.fc8.out_features), dtype=torch.float32)
self.grad_update = FashionMNISTFULL.GradUpdateFunc.apply
if configs['partial_freeze'] == True:
child_counter = 0
for child in self.children():
if child_counter < 20:
# print("child ", child_counter, " was frozen")
# for children_of_child in child.children():
for param in child.parameters():
param.requires_grad = False
child_counter += 1
# self.get_all_layers(self)
# self.maxPool1.register_forward_hook(self.hook_fn)
# self.register_backward_hook(self.hook_bw)
# self.fc8.register_backward_hook(self.hook_bw)
# self.softmax.register_backward_hook(self.hook_bw)
self.start = dt.datetime.now()
self.evol = getEvolutionDataFrame()
self.logrow = getLogRowJson(self.g1, self.g2, self.te_length)
def new_epoch(self):
return self.current_epoch != self.prev_epoch
def forward(self, x, target):
start = dt.datetime.now()
self.last_target = target
if self.training and self.new_epoch() and self.conv_te:
self._forward_passes = 0
self.fc7tes = torch.zeros(size=(self.fc7.out_features, self.fc8.in_features), dtype=torch.float32) # bool()
self.fc7eye = torch.eye(self.fc7tes.shape[0], self.fc7tes.shape[1], dtype=torch.float32, requires_grad=True)
self.fc7activ = torch.zeros(size=(self.fc7.out_features, 0), dtype=torch.uint8) # .bool()
# self.fc7pairidx = torch.cartesian_prod(torch.arange(end=self.fc7.out_features), torch.arange(end=self.fc8.in_features))
self.fc8tes = torch.zeros(size=(self.fc8.out_features, self.fc8.out_features), dtype=torch.float32) # bool()
self.fc8activ = np.zeros(shape=(0, self.fc8.out_features), dtype=np.uint8) #torch.zeros(size=(self.fc8.out_features, 0), dtype=torch.uint8) # .bool()
self.softmaxactiv = np.zeros(shape=(0, self.fc8.out_features), dtype=np.uint8)#torch.zeros(size=(self.fc8.out_features, 0), dtype=torch.uint8) # .bool()ol()
# if (self.configs['rolling_te_window'] == False):
self.averages['fcpair2'] = dict()
self.averages['fcpair3'] = dict()
# x = self.conv1(x)
# x = self.batchnorm2d1(x)
# x = self.relu1(x)
# x = self.maxPool1(x)
# x = self.conv2(x)
# x = self.batchnorm2d2(x)
# x = self.relu2(x)
#
# x = self.maxPool2(x)
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.drop1(x)
fc7med = None
fc8med = None
softmaxmed = None
x = self.fc7(x)
self.fc7output = x.clone()
# if self.training and self.conv_te:
# with torch.no_grad():
# # xb = x[-1,:].clone().detach().to(dtype=torch.uint8)
# xb = x.clone().detach()
# # fc8med = xb.median()
# # xb[xb >= self.g1] = 1
# # xb[xb < self.g1] = 0
# xb[(xb <= -self.g1) | (xb >= self.g1)] = 1
# xb[(xb != 1.)] = 0
# # xb[(xb > -self.g1) | (xb < self.g1)] = 0
# if self.configs['gpu'] != 'cpu':
# self.fc7activ = xb.to(dtype=torch.uint8).cuda().t() # torch.cat((self.fc8activ.t().cuda(), xb.to(dtype=torch.uint8).cuda()), dim=0).t()
# else:
# self.fc7activ = xb.to(dtype=torch.uint8).t() # torch.cat((self.fc8activ.t(), xb.to(dtype=torch.uint8)), dim=0).t()
x = self.fc8(x)
self.fc8output = x.clone()
if self.training and self.conv_te and self.withTE:
with torch.no_grad():
# xb = x[-1,:].clone().detach().to(dtype=torch.uint8)
xb = x.clone().detach()
# fc8med = xb.median()
# xb[xb >= self.g1] = 1
# xb[xb < self.g1] = 0
xb[(xb <= -self.g1) | (xb >= self.g1)] = 1
xb[(xb != 1.)] = 0
# xb[(xb > -self.g1) | (xb < self.g1)] = 0
self.fc8activ = prepare_activations(self.configs, self.fc8activ, xb.to(dtype=torch.uint8).numpy(), self.windowed)
self.softmaxoutput = self.softmax(x)
if self.training and self.conv_te and self.withTE:
with torch.no_grad():
xb = self.softmaxoutput.clone().detach()
# softmaxmed = xb.mean()
xb[xb >= self.g2] = 1 # (self.g / self.configs['num_classes'])] = 1
xb[xb < self.g2] = 0 # (self.g / self.configs['num_classes'])] = 0
self.softmaxactiv = prepare_activations(self.configs, self.softmaxactiv, xb.to(dtype=torch.uint8).numpy(), self.windowed)
ce = self.criterion(x, target)
self.last_x = x
if self.fwd and self.training and self.conv_te and (self.configs['fc8rate'] == 0 or self.configs['fc8rate'] is None) and \
(self._forward_passes * self.batch_size) >= self.te_length and self.withTE:
with torch.no_grad():
update_with_te(self,
keypair='fcpair3',
layerleft=self.fc8,
layerleftactiv=self.fc8activ,
layertes=self.fc8tes,
CE=0.,
eye=self.fc8eye,
gradinput=None,
layerright=self.softmax,
layerrightactiv=self.softmaxactiv,
layerright_output=self.softmaxoutput.clone().detach(),
configs=self.configs)
if self.training:
updateLogRow(self, ce)
self._forward_passes += 1
return x
def hook0_fn(self, grad):
# self.input = input
# self.output = output
# if self.training and self._forward_passes > 0 and self.conv_te:
# # if ((self._forward_passes + 1) * self.configs['batch_size']) >= self.configs['te_length']:
# if self._forward_passes > self.skip_first:
# if self.configs['fc7rate'] == 0 or self.configs['fc7rate'] is None:
# self.update_with_te(keypair='fcpair2',
# layerleft=self.fc7,
# layerleftactiv=self.fc7activ,
# layertes=self.fc7tes,
# CE=0.,
# eye=self.fc7eye,
# layerright=self.fc8,
# layerrightactiv=self.fc8activ,
# layerright_output=self.fc8output.clone().detach())
updateLogGrad(self, grad)
# if self.fc7 != None and self.fc7.weight != None and self.fc7.weight.grad != None:
# return self.fc7.weight.grad.data
sys.stdout.flush()
def hook_fn(self, grad):
input = grad
output = grad
#self._forward_passes > self.skip_first:
if self.fwd is False and self.training and self.conv_te and (self.configs['fc8rate'] == 0 or self.configs['fc8rate'] is None) and \
(self._forward_passes * self.batch_size) >= self.te_length and self.withTE:
with torch.no_grad():
update_with_te(self,
keypair='fcpair3',
layerleft=self.fc8,
layerleftactiv=self.fc8activ,
layertes=self.fc8tes,
CE=0.,
eye=self.fc8eye,
gradinput=None,
layerright=self.softmax,
layerrightactiv=self.softmaxactiv,
layerright_output=self.softmaxoutput.clone().detach(),
configs=self.configs)
if self.training:
self.logrow['fc2gradmin'] = grad.min().item()
self.logrow['fc2gradmax'] = grad.max().item()
self.logrow['fc2gradmean'] = grad.mean().item()
self.logrow['fc2gradstd'] = grad.std().item()
# if res != None:
# return res
# return grad
def close(self):
self.hook0.remove()
self.hook.remove()
def fashionmnistfull(configs, progress=True, **kwargs):
model = FashionMNISTFULL(configs=configs, **kwargs)
if configs['pretrained']:
if configs['pretrained_url'] == None:
state_dict = load_state_dict_from_url(model_urls['alexnet'], map_location=configs['gpu'], progress=progress)
model.load_state_dict(state_dict)
else:
state_dict = load_state_dict_from_path(model_path=configs['pretrained_url'], map_location=configs['gpu'],
progress=progress)
model.load_state_dict(state_dict['state_dict'])
elif configs['partial_freeze'] and configs['pretrained_url'] is not None:
# state_dict = load_state_dict_from_path(configs['pretrained_url'], map_location=torch.device('cpu'), progress=progress)
# if not torch.cuda.is_available():
state_dict = load_state_dict_from_path(configs['pretrained_url'], map_location='cpu', progress=progress)
# else:
# state_dict = load_state_dict_from_path(configs['pretrained_url'], progress=progress)
tempDict = OrderedDict()
if list(state_dict['state_dict'].keys())[0].startswith('module.'):
for k in state_dict['state_dict'].keys():
tempDict[k[7:]] = state_dict['state_dict'][k]
model.load_state_dict(tempDict)
else:
model.load_state_dict(state_dict['state_dict'])
return model
import os
import re
import torch
import zipfile
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
def _get_torch_home():
torch_home = os.path.expanduser(
os.getenv(ENV_TORCH_HOME,
os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
def load_state_dict_from_path(model_path, model_dir=None, map_location=None, progress=True, check_hash=False):
# Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand.
# We deliberately don't handle tarfile here since our legacy serialization format was in tar.
# E.g. resnet18-5c106cde.pth which is widely used.
if zipfile.is_zipfile(model_path):
with zipfile.ZipFile(model_path) as cached_zipfile:
members = cached_zipfile.infolist()
if len(members) != 1:
raise RuntimeError('Only one file(not dir) is allowed in the zipfile')
cached_zipfile.extractall(model_dir)
extraced_name = members[0].filename
model_path = os.path.join(model_dir, extraced_name)
return torch.load(model_path, map_location=map_location)
# shamelessly copied from here https://discuss.pytorch.org/t/utility-function-for-calculating-the-shape-of-a-conv-output/11173/7
# original docs at https://pytorch.org/docs/master/nn.html#conv2d
#
# # def hooked_backward(self, module):
# # with hook_output(module) as hook_a:
# # preds = module(torch.random(1,3,224,224))
# # preds[0, 0].backward()
# # return hook_a
#
# # def hook_fn(m, grad_input, grad_output):
# # # visualisation[m] = o
# # return grad_output * 0.01
# # #pass
#
# # def get_all_layers(self, net):
# # handles = (None, None)
# # for name, layer in net._modules.items():
# # # If it is a sequential, don't register a hook on it
# # # but recursively register hook on all it's module children
# # if isinstance(layer, nn.Sequential):
# # net.get_all_layers(layer)
# # else:
# # # it's a non sequential. Register a hook
# # layer.register_forward_hook(self.hook_fn)
# # return handles
#
# def memory_strided_im2col(self, x, kernel, step=1):
# output_shape = (x.shape[0] - kernel) + 1
# return view_as_windows(x.numpy(), kernel).reshape(output_shape * output_shape, kernel * 2)
# # output_shape = (x.shape[0] - kernel.shape[0]) + 1
# # return view_as_windows(x, kernel.shape, step).reshape(output_shape * output_shape, kernel.shape[0] * 2)
# # view_as_windows has an additional step parameter that can be used with different strides
#
#
# # input_matrix = np.array([[3,9,0], [2, 8, 1], [1,4,8]])
# # kernel = np.array([[8,9], [4,4]])
# # output_shape = (input_matrix.shape[0] - kernel.shape[0]) + 1
# # mem_strided_mat = memory_strided_im2col(input_matrix, kernel)
# # mem_strided_conv = np.dot(kernel.flatten(), mem_strided_mat) + biasmem_strided_conv = mem_strided_conv.reshape(output_shape, output_shape)
# # PS: x_newview = np.lib.stride_tricks.as_strided(x, shape = (5, 4), strides = (8,8))
#
# def conv_2d(x, kernel):
# # Assuming Padding = 0, stride = 1
# output_shape = x.shape[0] - kernel + 1
# result = np.zeros((output_shape, output_shape))
#
# for row in range(x.shape[0] - 1):
# for col in range(x.shape[1] - 1):
# window = x[row: row + kernel, col: col + kernel]
# result[row, col] = np.sum(np.multiply(kernel, window))
# return result
#
#
# def calculateWindows(self, x):
# windows = F.unfold(x, kernel_size=11, padding=2, dilation=1, stride=4)
#
# windows = windows.transpose(1, 2).contiguous().view(-1, x.shape[1], 11 * 11)
# windows = windows.transpose(0, 1)
#
# return windows
#
#
# def add_pairs(self, module, keypair, inp, calc_te=False, left_layer=True):
# if self.conv_te:
# if isinstance(module.padding, tuple):
# xpad = F.pad(inp.detach(),
# pad=([module.padding[0], module.padding[0], module.padding[1], module.padding[1]]),
# mode='constant', value=0)
# elif module.padding != 0:
# xpad = F.pad(inp.detach(),
# pad=([module.padding, module.padding, module.padding, module.padding]),
# mode='constant', value=0)
# else:
# xpad = inp.detach()
#
# kernel_h_index = 0
# if (isinstance(module.kernel_size, tuple)):
# kernel_size = module.kernel_size[0]
# else:
# kernel_size = module.kernel_size
# kernel_w_index = 0
# if not keypair in self.averages:
# self.averages[keypair] = dict()
#
# for b in range(0, xpad.shape[0]):
# if not b in self.averages[keypair]:
# self.averages[keypair][b] = dict()
# for ic in range(0, xpad[b].shape[0]):
# if not ic in self.averages[keypair][b]:
# self.averages[keypair][b][ic] = TEDiscrete()
# row_windows = 0
# col_windows = 0
# while (kernel_h_index + kernel_size) <= xpad[b, ic].shape[0]:
# while (kernel_w_index + kernel_size) <= xpad[b, ic].shape[0]:
# # print('filter: ' + str(kernel_h_index) + ':' + str(kernel_h_index + kernel_size) + ', ' + str(kernel_w_index) + ':' + str(kernel_w_index + kernel_size))
# window = xpad[b, ic][kernel_h_index: kernel_h_index + kernel_size,
# kernel_w_index: kernel_w_index + kernel_size]
# # can explore of having the same TE array built from multiple adjacent layers at once
# # having the average of a single layerused as a single entry
# med = window.median()
# if med <= self.g:
# if calc_te == True:
# self.averages[keypair][b][ic].add_y(0.)
# else:
# self.averages[keypair][b][ic].add_x(0.)
# else:
# if calc_te == True:
# self.averages[keypair][b][ic].add_y(1.)
# else:
# self.averages[keypair][b][ic].add_x(1.)
# if (isinstance(module.stride, tuple)):
# kernel_w_index += module.stride[0]
# else:
# kernel_w_index += module.stride
# kernel_w_index = 0
# if (isinstance(module.stride, tuple)):
# kernel_h_index += module.stride[0]
# else:
# kernel_h_index += module.stride
#
# kernel_h_index = 0
# kernel_w_index = 0
#
# # triger the TE calculus for the last pair of layers for all xs and ys gathered in TE
# if calc_te == True and self._forward_passes >= self.skip_first:
# for b in range(0, xpad.shape[0]):
# for ic in range(0, xpad[0].shape[0]):
# assert len(self.averages[keypair][b][ic].xs) == len(
# self.averages[keypair][b][ic].ys), "TE input series lengths are different"
# self.averages[keypair][b][ic].pair_xy()
# self.averages[keypair][b][ic].calc_te()
# # print(self.averages[keypair][b][ic].sum)
#
#
# # def add_fc_pairs(self, keypair, inp, inp2, interim, calc_te = False, left_layer = True):
# # if (self._forward_passes % self.batch_size) == 0:
# # idx = math.floor(self._forward_passes / self.batch_size)
# # xb = inp[self.batch_size - 1].detach().to(torch.uint8)
# # xb[xb >= self.g] = 1
# # xb[xb > self.g] = 0
# # interim[:, idx] = xb
# # del xb
# #
# # #torch.cartesian_prod(aa, bb)
# #
# # if self.conv_te == False and calc_te:
# # cp = torch.cartesian_prod(inp[:, -1], inp2[:, -1])
# # #fcpair = self.product(inp[:, -1], inp2[:,-1])
# #
# # #we look for non conv layer logic
# # #xpad = inp.detach()
# #
# # if not keypair in self.averages:
# # self.averages[keypair] = dict()
# #
# # for b in range(0, xpad.shape[0]):
# # if not b in self.averages[keypair]:
# # self.averages[keypair][b] = dict()
# # for ic in range(0, xpad[b].shape[0]):
# # if not ic in self.averages[keypair][b]:
# # self.averages[keypair][b][ic] = TEDiscrete()
# # self.averages[keypair][b][ic].initialise()
# #
# # if xpad[b][ic] <= self.g:
# # if calc_te == True:
# # self.averages[keypair][b][ic].add_dest(0)
# # else:
# # self.averages[keypair][b][ic].add_source(0)
# # else:
# # if calc_te == True:
# # self.averages[keypair][b][ic].add_dest(1)
# # else:
# # self.averages[keypair][b][ic].add_source(1)
# #
# # #triger the TE calculus for the last pair of layers for all xs and ys gathered in TE
# # if calc_te == True and self._forward_passes >= self.skip_first:
# # for b in range(0, xpad.shape[0]):
# # for ic in range(0, xpad[0].shape[0]):
# # assert len(self.averages[keypair][b][ic].xs) == len(self.averages[keypair][b][ic].ys), "TE input series lengths are different"
# # sum = self.averages[keypair][b][ic].calcLocalTE()
# # print(sum)
#
# # def product(self, *args, repeat=1):
# # # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# # # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
# # pools = [tuple(pool) for pool in args] * repeat
# # result = [[]]
# # for pool in pools:
# # result = [x + [y] for x in result for y in pool]
# # return result
#
#
#
# def num2tuple(num):
# return num if isinstance(num, tuple) else (num, num)
#
#
# def conv2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
# h_w, kernel_size, stride, pad, dilation = num2tuple(h_w), \
# num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(
# dilation)
# pad = num2tuple(pad[0]), num2tuple(pad[1])
#
# h = math.floor((h_w[0] + sum(pad[0]) - dilation[0] * (kernel_size[0] - 1) - 1) / stride[0] + 1)
# w = math.floor((h_w[1] + sum(pad[1]) - dilation[1] * (kernel_size[1] - 1) - 1) / stride[1] + 1)
#
# return h, w
#
#
# def convtransp2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0):
# h_w, kernel_size, stride, pad, dilation, out_pad = num2tuple(h_w), \
# num2tuple(kernel_size), num2tuple(stride), num2tuple(
# pad), num2tuple(dilation), num2tuple(out_pad)
# pad = num2tuple(pad[0]), num2tuple(pad[1])
#
# h = (h_w[0] - 1) * stride[0] - sum(pad[0]) + dilation[0] * (kernel_size[0] - 1) + out_pad[0] + 1
# w = (h_w[1] - 1) * stride[1] - sum(pad[1]) + dilation[1] * (kernel_size[1] - 1) + out_pad[1] + 1
#
# return h, w
#
#
# def conv2d_get_padding(h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1):
# h_w_in, h_w_out, kernel_size, stride, dilation = num2tuple(h_w_in), num2tuple(h_w_out), \
# num2tuple(kernel_size), num2tuple(stride), num2tuple(dilation)
#
# p_h = ((h_w_out[0] - 1) * stride[0] - h_w_in[0] + dilation[0] * (kernel_size[0] - 1) + 1)
# p_w = ((h_w_out[1] - 1) * stride[1] - h_w_in[1] + dilation[1] * (kernel_size[1] - 1) + 1)
#
# return (math.floor(p_h / 2), math.ceil(p_h / 2)), (math.floor(p_w / 2), math.ceil(p_w / 2))
#
#
# def convtransp2d_get_padding(h_w_in, h_w_out, kernel_size=1, stride=1, dilation=1, out_pad=0):
# h_w_in, h_w_out, kernel_size, stride, dilation, out_pad = num2tuple(h_w_in), num2tuple(h_w_out), \
# num2tuple(kernel_size), num2tuple(stride), num2tuple(
# dilation), num2tuple(out_pad)
#
# p_h = -(h_w_out[0] - 1 - out_pad[0] - dilation[0] * (kernel_size[0] - 1) - (h_w_in[0] - 1) * stride[0]) / 2
# p_w = -(h_w_out[1] - 1 - out_pad[1] - dilation[1] * (kernel_size[1] - 1) - (h_w_in[1] - 1) * stride[1]) / 2
#
# return (math.floor(p_h / 2), math.ceil(p_h / 2)), (math.floor(p_w / 2), math.ceil(p_w / 2))
# # self.features = nn.Sequential(OrderedDict( {
# # 'Conv1' : nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),#padding 5
# # 'Relu1' : nn.ReLU(inplace=True),
# # 'MaxPool1' : nn.MaxPool2d(kernel_size=3, stride=2),#kernel_size=2.
# # 'Conv2' : nn.Conv2d(64, 192, kernel_size=5, padding=2),
# # 'Relu2' : nn.ReLU(inplace=True),
# # 'MaxPool2' : nn.MaxPool2d(kernel_size=3, stride=2),#kernel_size=2
# # 'Conv3' : nn.Conv2d(192, 384, kernel_size=3, padding=1),
# # 'Relu3' : nn.ReLU(inplace=True),
# # 'Conv4' : nn.Conv2d(384, 256, kernel_size=3, padding=1),
# # 'Relu4' : nn.ReLU(inplace=True),
# # 'Conv5' : nn.Conv2d(256, 256, kernel_size=3, padding=1),
# # 'Relu5' : nn.ReLU(inplace=True),
# # 'MaxPool5' : nn.MaxPool2d(kernel_size=3, stride=2),#kernel_size=2
# # }))
| 42.905585 | 180 | 0.562219 |
65a8f108722358478e96dc1cffc9d04e8c9d619e | 1,141 | py | Python | mboot/decorator.py | nxdhf/pyMBoot | c0a609487ea752b4037e998053acb19e8ce50137 | [
"BSD-3-Clause"
] | null | null | null | mboot/decorator.py | nxdhf/pyMBoot | c0a609487ea752b4037e998053acb19e8ce50137 | [
"BSD-3-Clause"
] | null | null | null | mboot/decorator.py | nxdhf/pyMBoot | c0a609487ea752b4037e998053acb19e8ce50137 | [
"BSD-3-Clause"
] | null | null | null | import sys
import time
import logging
import traceback
import functools
def global_error_handler(func):
@functools.wraps(func)
def warpper():
try:
func()
except Exception as e:
root_logger = logging.getLogger()
err_msg = '\n' + traceback.format_exc() if root_logger.level == logging.DEBUG else ' ERROR: {}'.format(str(e))
print(err_msg)
sys.exit(0)
return warpper
def clock(func):
'''Used to calculate function time in debug, manually add'''
@functools.wraps(func)
def clocked(*args, **kwargs):
t0 = time.perf_counter()
result = func(*args, **kwargs)
elapsed = time.perf_counter() - t0
arg_lst = []
name = func.__name__
if args:
arg_lst.append(', '.join(repr(arg) for arg in args))
if kwargs:
pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]
arg_lst.append(', '.join(pairs))
arg_str = ', '.join(arg_lst)
logging.debug('[%0.8f] %s(%s) -> %r ' % (elapsed, name, arg_str, result))
return result
return clocked | 31.694444 | 122 | 0.568799 |
9beb18ccd57ae91d21623877ebd1eb6dbbd94b2c | 23,587 | py | Python | test/test_markdown_backslash_escapes_extra.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | 20 | 2021-01-14T17:39:09.000Z | 2022-03-14T08:35:22.000Z | test/test_markdown_backslash_escapes_extra.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | 304 | 2020-08-15T23:24:00.000Z | 2022-03-31T23:34:03.000Z | test/test_markdown_backslash_escapes_extra.py | scop/pymarkdown | 562ba8f7857d99ba09e86e42de5a37ec6d9b2c30 | [
"MIT"
] | 3 | 2021-08-11T10:26:26.000Z | 2021-11-02T20:41:27.000Z | """
https://github.github.com/gfm/#backslash-escapes
"""
import pytest
from .utils import act_and_assert
@pytest.mark.gfm
def test_backslash_escapes_extra_1():
"""
Test case backslash extra 1: backslash before the code span open
"""
# Arrange
source_markdown = """\\`code span`"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b`code span`:]",
"[end-para:::True]",
]
expected_gfm = """<p>`code span`</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_1a():
"""
Test case backslash extra 1a: backslash before the code span closed
"""
# Arrange
source_markdown = """`code span\\`"""
expected_tokens = [
"[para(1,1):]",
"[icode-span(1,1):code span\\:`::]",
"[end-para:::True]",
]
expected_gfm = """<p><code>code span\\</code></p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_2():
"""
Test case backslash extra 2: backslash before the character reference
"""
# Arrange
source_markdown = """\\& the band played on"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b\a&\a&\aamp; the band played on:]",
"[end-para:::True]",
]
expected_gfm = """<p>&amp; the band played on</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_3():
"""
Test case backslash extra 3: backslash before the inline html open
"""
# Arrange
source_markdown = """\\<there it='is'>, really"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b\a<\a<\athere it='is'\a>\a>\a, really:]",
"[end-para:::True]",
]
expected_gfm = """<p><there it='is'>, really</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_4():
"""
Test case backslash extra 4: backslash before the inline html close
"""
# Arrange
source_markdown = """<there it='is'\\>, really"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\a<\a<\athere it='is'\\\b\a>\a>\a, really:]",
"[end-para:::True]",
]
expected_gfm = """<p><there it='is'>, really</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_5():
"""
Test case backslash extra 5: backslash before the autolink open
"""
# Arrange
source_markdown = """\\<http://www.google.com> is where to look"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b\a<\a<\ahttp://www.google.com\a>\a>\a is where to look:]",
"[end-para:::True]",
]
expected_gfm = """<p><http://www.google.com> is where to look</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_6():
"""
Test case backslash extra 6: backslash before the autolink close
"""
# Arrange
source_markdown = """<http://www.google.com\\> is where to look"""
expected_tokens = [
"[para(1,1):]",
"[uri-autolink(1,1):http://www.google.com\\]",
"[text(1,25): is where to look:]",
"[end-para:::True]",
]
expected_gfm = """<p><a href="http://www.google.com%5C">http://www.google.com\\</a> is where to look</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_7():
"""
Test case backslash extra 7: backslash before the emphasis start
"""
# Arrange
source_markdown = """\\*it's* me!"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b*it's:]",
"[text(1,7):*:]",
"[text(1,8): me!:]",
"[end-para:::True]",
]
expected_gfm = """<p>*it's* me!</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_8():
"""
Test case backslash extra 8: backslash before the emphasis end
"""
# Arrange
source_markdown = """*it's\\* me!"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):*:]",
"[text(1,2):it's\\\b* me!:]",
"[end-para:::True]",
]
expected_gfm = """<p>*it's* me!</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_9():
"""
Test case backslash extra 9: backslash before the first emphasis start
"""
# Arrange
source_markdown = """*\\*it's** me!"""
expected_tokens = [
"[para(1,1):]",
"[emphasis(1,1):1:*]",
"[text(1,2):\\\b*it's:]",
"[end-emphasis(1,8)::]",
"[text(1,9):*:]",
"[text(1,10): me!:]",
"[end-para:::True]",
]
expected_gfm = """<p><em>*it's</em>* me!</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_10():
"""
Test case backslash extra 10: backslash before the first emphasis end
"""
# Arrange
source_markdown = """**it's\\** me!"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):*:]",
"[emphasis(1,2):1:*]",
"[text(1,3):it's\\\b*:]",
"[end-emphasis(1,9)::]",
"[text(1,10): me!:]",
"[end-para:::True]",
]
expected_gfm = """<p>*<em>it's*</em> me!</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_11():
"""
Test case backslash extra 11: backslash before the link open
"""
# Arrange
source_markdown = """\\[Foo](/uri) is a link"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b[Foo:]",
"[text(1,6):]:]",
"[text(1,7):(/uri) is a link:]",
"[end-para:::True]",
]
expected_gfm = """<p>[Foo](/uri) is a link</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_12():
"""
Test case backslash extra 12: backslash before the image open
"""
# Arrange
source_markdown = """\\ is an image"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):\\\b!:]",
'[link(1,3):inline:/url:title::::foo:False:":: :]',
"[text(1,4):foo:]",
"[end-link::]",
"[text(1,22): is an image:]",
"[end-para:::True]",
]
expected_gfm = """<p>!<a href="/url" title="title">foo</a> is an image</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_13():
"""
Test case backslash extra 13: backslash between the image open characters
Also see: 600
"""
# Arrange
source_markdown = """!\\[foo](/url "title") is an image"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):!\\\b[foo:]",
"[text(1,7):]:]",
'[text(1,8):(/url \a"\a"\atitle\a"\a"\a) is an image:]',
"[end-para:::True]",
]
expected_gfm = """<p> is an image</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_14():
"""
Test case backslash extra 14: backslash before the code span open in setext
"""
# Arrange
source_markdown = """\\`code span`
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b`code span`:]",
"[end-setext::]",
]
expected_gfm = """<h2>`code span`</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_14a():
"""
Test case backslash extra 14a: backslash before the code span closed in setext
"""
# Arrange
source_markdown = """`code span\\`
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[icode-span(1,1):code span\\:`::]",
"[end-setext::]",
]
expected_gfm = """<h2><code>code span\\</code></h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_15():
"""
Test case backslash extra 15: backslash before the character reference in setext
"""
# Arrange
source_markdown = """\\& the band played on
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b\a&\a&\aamp; the band played on:]",
"[end-setext::]",
]
expected_gfm = """<h2>&amp; the band played on</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_16():
"""
Test case backslash extra 16: backslash before the inline html open in setext
"""
# Arrange
source_markdown = """\\<there it='is'>, really
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b\a<\a<\athere it='is'\a>\a>\a, really:]",
"[end-setext::]",
]
expected_gfm = """<h2><there it='is'>, really</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_17():
"""
Test case backslash extra 17: backslash before the inline html close in setext
"""
# Arrange
source_markdown = """<there it='is'\\>, really
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\a<\a<\athere it='is'\\\b\a>\a>\a, really:]",
"[end-setext::]",
]
expected_gfm = """<h2><there it='is'>, really</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_18():
"""
Test case backslash extra 18: backslash before the autolink open in setext
"""
# Arrange
source_markdown = """\\<http://www.google.com> is where to look
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b\a<\a<\ahttp://www.google.com\a>\a>\a is where to look:]",
"[end-setext::]",
]
expected_gfm = """<h2><http://www.google.com> is where to look</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_19():
"""
Test case backslash extra 19: backslash before the autolink close in setext
"""
# Arrange
source_markdown = """<http://www.google.com\\> is where to look
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[uri-autolink(1,1):http://www.google.com\\]",
"[text(1,25): is where to look:]",
"[end-setext::]",
]
expected_gfm = """<h2><a href="http://www.google.com%5C">http://www.google.com\\</a> is where to look</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_20():
"""
Test case backslash extra 20: backslash before the emphasis start in setext
"""
# Arrange
source_markdown = """\\*it's* me!
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b*it's:]",
"[text(1,7):*:]",
"[text(1,8): me!:]",
"[end-setext::]",
]
expected_gfm = """<h2>*it's* me!</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_21():
"""
Test case backslash extra 21: backslash before the emphasis end in setext
"""
# Arrange
source_markdown = """*it's\\* me!
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):*:]",
"[text(1,2):it's\\\b* me!:]",
"[end-setext::]",
]
expected_gfm = """<h2>*it's* me!</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_22():
"""
Test case backslash extra 22: backslash before the first emphasis start in setext
"""
# Arrange
source_markdown = """*\\*it's** me!
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[emphasis(1,1):1:*]",
"[text(1,2):\\\b*it's:]",
"[end-emphasis(1,8)::]",
"[text(1,9):*:]",
"[text(1,10): me!:]",
"[end-setext::]",
]
expected_gfm = """<h2><em>*it's</em>* me!</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_23():
"""
Test case backslash extra 23: backslash before the first emphasis end in setext
"""
# Arrange
source_markdown = """**it's\\** me!
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):*:]",
"[emphasis(1,2):1:*]",
"[text(1,3):it's\\\b*:]",
"[end-emphasis(1,9)::]",
"[text(1,10): me!:]",
"[end-setext::]",
]
expected_gfm = """<h2>*<em>it's*</em> me!</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_24():
"""
Test case backslash extra 24: backslash before the link open in setext
"""
# Arrange
source_markdown = """\\[Foo](/uri) is a link
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b[Foo:]",
"[text(1,6):]:]",
"[text(1,7):(/uri) is a link:]",
"[end-setext::]",
]
expected_gfm = """<h2>[Foo](/uri) is a link</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_25():
"""
Test case backslash extra 25: backslash before the image open in setext
"""
# Arrange
source_markdown = """\\ is an image
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):\\\b!:]",
'[link(1,3):inline:/url:title::::foo:False:":: :]',
"[text(1,4):foo:]",
"[end-link::]",
"[text(1,22): is an image:]",
"[end-setext::]",
]
expected_gfm = """<h2>!<a href="/url" title="title">foo</a> is an image</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_26():
"""
Test case backslash extra 26: backslash between the image open characters in setext
"""
# Arrange
source_markdown = """!\\[foo](/url "title") is an image
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):!\\\b[foo:]",
"[text(1,7):]:]",
'[text(1,8):(/url \a"\a"\atitle\a"\a"\a) is an image:]',
"[end-setext::]",
]
expected_gfm = """<h2> is an image</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_27():
"""
Test case backslash extra 27: backslash before the code span open in atx
"""
# Arrange
source_markdown = """# \\`code span`"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b`code span`: ]",
"[end-atx::]",
]
expected_gfm = """<h1>`code span`</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_27a():
"""
Test case backslash extra 27a: backslash before the code span closed in atx
"""
# Arrange
source_markdown = """# `code span\\`"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3)::\a \a\x03\a]",
"[icode-span(1,3):code span\\:`::]",
"[end-atx::]",
]
expected_gfm = """<h1><code>code span\\</code></h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_28():
"""
Test case backslash extra 28: backslash before the character reference in atx
"""
# Arrange
source_markdown = """# \\& the band played on"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b\a&\a&\aamp; the band played on: ]",
"[end-atx::]",
]
expected_gfm = """<h1>&amp; the band played on</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_29():
"""
Test case backslash extra 29: backslash before the inline html open in atx
"""
# Arrange
source_markdown = """# \\<there it='is'>, really"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b\a<\a<\athere it='is'\a>\a>\a, really: ]",
"[end-atx::]",
]
expected_gfm = """<h1><there it='is'>, really</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_30():
"""
Test case backslash extra 30: backslash before the inline html close in atx
"""
# Arrange
source_markdown = """# <there it='is'\\>, really"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\a<\a<\athere it='is'\\\b\a>\a>\a, really: ]",
"[end-atx::]",
]
expected_gfm = """<h1><there it='is'>, really</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_31():
"""
Test case backslash extra 31: backslash before the autolink open in atx
"""
# Arrange
source_markdown = """# \\<http://www.google.com> is where to look"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b\a<\a<\ahttp://www.google.com\a>\a>\a is where to look: ]",
"[end-atx::]",
]
expected_gfm = """<h1><http://www.google.com> is where to look</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_32():
"""
Test case backslash extra 32: backslash before the autolink close in atx
"""
# Arrange
source_markdown = """# <http://www.google.com\\> is where to look"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3)::\a \a\x03\a]",
"[uri-autolink(1,3):http://www.google.com\\]",
"[text(1,27): is where to look:]",
"[end-atx::]",
]
expected_gfm = """<h1><a href="http://www.google.com%5C">http://www.google.com\\</a> is where to look</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_33():
"""
Test case backslash extra 33: backslash before the emphasis start in atx
"""
# Arrange
source_markdown = """# \\*it's* me!"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b*it's: ]",
"[text(1,9):*:]",
"[text(1,10): me!:]",
"[end-atx::]",
]
expected_gfm = """<h1>*it's* me!</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_34():
"""
Test case backslash extra 34: backslash before the emphasis end in atx
"""
# Arrange
source_markdown = """# *it's\\* me!"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3)::\a \a\x03\a]",
"[text(1,3):*:]",
"[text(1,4):it's\\\b* me!:]",
"[end-atx::]",
]
expected_gfm = """<h1>*it's* me!</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_35():
"""
Test case backslash extra 35: backslash before the first emphasis start in atx
"""
# Arrange
source_markdown = """# *\\*it's** me!"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3)::\a \a\x03\a]",
"[emphasis(1,3):1:*]",
"[text(1,4):\\\b*it's:]",
"[end-emphasis(1,10)::]",
"[text(1,11):*:]",
"[text(1,12): me!:]",
"[end-atx::]",
]
expected_gfm = """<h1><em>*it's</em>* me!</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_36():
"""
Test case backslash extra 36: backslash before the first emphasis end in atx
"""
# Arrange
source_markdown = """# **it's\\** me!"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3)::\a \a\x03\a]",
"[text(1,3):*:]",
"[emphasis(1,4):1:*]",
"[text(1,5):it's\\\b*:]",
"[end-emphasis(1,11)::]",
"[text(1,12): me!:]",
"[end-atx::]",
]
expected_gfm = """<h1>*<em>it's*</em> me!</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_37():
"""
Test case backslash extra 37: backslash before the link open in atx
"""
# Arrange
source_markdown = """# \\[Foo](/uri) is a link"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b[Foo: ]",
"[text(1,8):]:]",
"[text(1,9):(/uri) is a link:]",
"[end-atx::]",
]
expected_gfm = """<h1>[Foo](/uri) is a link</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_38():
"""
Test case backslash extra 38: backslash before the image open in atx
"""
# Arrange
source_markdown = """# \\ is an image"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):\\\b!: ]",
'[link(1,5):inline:/url:title::::foo:False:":: :]',
"[text(1,6):foo:]",
"[end-link::]",
"[text(1,24): is an image:]",
"[end-atx::]",
]
expected_gfm = """<h1>!<a href="/url" title="title">foo</a> is an image</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_backslash_escapes_extra_39():
"""
Test case backslash extra 39: backslash between the image open characters in atx
"""
# Arrange
source_markdown = """# !\\[foo](/url "title") is an image"""
expected_tokens = [
"[atx(1,1):1:0:]",
"[text(1,3):!\\\b[foo: ]",
"[text(1,9):]:]",
'[text(1,10):(/url \a"\a"\atitle\a"\a"\a) is an image:]',
"[end-atx::]",
]
expected_gfm = """<h1> is an image</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 26.651977 | 113 | 0.559969 |
ea8b7605314d1398928a8404ceef2c503ed1d915 | 570 | py | Python | tests/utils/test_regex.py | tonysyu/qwikstart | d942c486a7d4362354e95a11e3797c42a982c891 | [
"BSD-3-Clause"
] | 1 | 2020-04-10T01:55:51.000Z | 2020-04-10T01:55:51.000Z | tests/utils/test_regex.py | tonysyu/qwikstart | d942c486a7d4362354e95a11e3797c42a982c891 | [
"BSD-3-Clause"
] | 112 | 2019-12-24T20:04:05.000Z | 2022-01-25T19:05:20.000Z | tests/utils/test_regex.py | tonysyu/qwikstart | d942c486a7d4362354e95a11e3797c42a982c891 | [
"BSD-3-Clause"
] | null | null | null | import re
from qwikstart.utils import regex
class TestCreateRegexFlags:
def test_empty_list(self) -> None:
assert regex.create_regex_flags([]) == re.RegexFlag(0)
def test_unknown_flag(self) -> None:
assert regex.create_regex_flags(["NOT-A-FLAG"]) == re.RegexFlag(0)
def test_single_flag(self) -> None:
assert regex.create_regex_flags(["MULTILINE"]) == re.MULTILINE
def test_two_flags(self) -> None:
flags = ["MULTILINE", "IGNORECASE"]
assert regex.create_regex_flags(flags) == (re.MULTILINE | re.IGNORECASE)
| 30 | 80 | 0.680702 |
fb231c25a543ccd89a4cec13a29a46d76bb797a7 | 1,235 | py | Python | setup.py | djt5019/episode_renamer | 84d3dda24b54c70a489c2a6bcb9c7c2c2dadfba9 | [
"Unlicense"
] | null | null | null | setup.py | djt5019/episode_renamer | 84d3dda24b54c70a489c2a6bcb9c7c2c2dadfba9 | [
"Unlicense"
] | null | null | null | setup.py | djt5019/episode_renamer | 84d3dda24b54c70a489c2a6bcb9c7c2c2dadfba9 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
from setuptools import find_packages, setup
from eplist import __author__ as author
from eplist import __email__ as email
from eplist import __version__ as version
import sys
info = sys.version_info
if (info.major, info.minor) != (2, 7):
print "Requires Python 2.7"
exit(1)
setup(
name='eplist',
version=version,
description='Simple episode renaming program',
long_description=open('README.rst').read(),
author=author,
author_email=email,
url='https://github.com/djt5019/episode_renamer',
packages=find_packages(),
license="unlicense",
zip_safe=False,
platforms="all",
classifiers=[
"Programming Language :: Python :: 2.7",
"Topic :: Multimedia :: Video",
"Topic :: Utilities",
"Environment :: Console",
"Environment :: X11 Applications :: Qt",
"Operating System :: OS Independent",
],
requires=[
"BeautifulSoup (>=3.2.0)",
"requests (>=0.9.1)",
],
entry_points={
'console_scripts': ['eplist = eplist.main:main']
},
package_data={'': ['eplist.py', 'LICENSE', 'README.rst']},
include_package_data=True,
)
| 26.847826 | 63 | 0.607287 |
61c6064cb8829035b314973d733b072fa98b84ad | 231 | py | Python | helpers/numpy_repeat_1.py | futureseadev/hgwxx7 | 282b370afc7d9c277e6c1f5b31282f14f9236f7b | [
"MIT"
] | 6 | 2018-06-21T09:44:36.000Z | 2021-10-01T18:37:41.000Z | helpers/numpy_repeat_1.py | futureseadev/hgwxx7 | 282b370afc7d9c277e6c1f5b31282f14f9236f7b | [
"MIT"
] | 15 | 2020-01-28T22:56:15.000Z | 2022-03-11T23:55:52.000Z | helpers/numpy_repeat_1.py | praveentn/hgwxx7 | 282b370afc7d9c277e6c1f5b31282f14f9236f7b | [
"MIT"
] | 2 | 2018-06-25T16:40:20.000Z | 2021-10-01T18:37:42.000Z | # numpy repeat - use case 1
# creating a new pandas column from a list
# assuming you've a dataframe df
import numpy as np
my_list = ['A','B','C','D']
df['my_list'] = np.repeat(my_list, 5)
# repeats each item in my_list 5 times
| 21 | 42 | 0.683983 |
18f76373d16f40d982a07acbaf7260f7925b736c | 20,748 | py | Python | critiquebrainz/ws/review/views.py | AbhinavOhri/critiquebrainz | d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/ws/review/views.py | AbhinavOhri/critiquebrainz | d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/ws/review/views.py | AbhinavOhri/critiquebrainz | d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4 | [
"Apache-2.0"
] | null | null | null | from brainzutils import cache
from flask import Blueprint, jsonify
import critiquebrainz.db.review as db_review
from critiquebrainz.db import (
vote as db_vote,
exceptions as db_exceptions,
spam_report as db_spam_report,
revision as db_revision,
users as db_users,
REVIEW_RATING_MIN,
REVIEW_RATING_MAX,
REVIEW_TEXT_MIN_LENGTH,
REVIEW_TEXT_MAX_LENGTH
)
from critiquebrainz.db.review import supported_languages, ENTITY_TYPES
from critiquebrainz.decorators import crossdomain
from critiquebrainz.ws.exceptions import NotFound, AccessDenied, InvalidRequest, LimitExceeded, MissingDataError
from critiquebrainz.ws.oauth import oauth
from critiquebrainz.ws.parser import Parser
review_bp = Blueprint('ws_review', __name__)
REVIEW_CACHE_NAMESPACE = "Review"
def get_review_or_404(review_id):
"""Get a review using review ID or raise error 404"""
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound("Can't find a review with ID: {review_id}".format(review_id=review_id))
return review
@review_bp.route('/<uuid:review_id>', methods=['GET'])
@crossdomain()
def review_entity_handler(review_id):
"""Get review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de \\
-X GET
**Response Example:**
.. code-block:: json
{
"review": {
"created": "Tue, 10 Aug 2010 00:00:00 GMT",
"edits": 0,
"entity_id": "03e0a99c-3530-4e64-8f50-6592325c2082",
"entity_type": "release_group",
"id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"language": "en",
"last_updated": "Tue, 10 Aug 2010 00:00:00 GMT",
"license": {
"full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported",
"id": "CC BY-NC-SA 3.0",
"info_url": "https://creativecommons.org/licenses/by-nc-sa/3.0/"
},
"popularity": 0,
"source": "BBC",
"source_url": "http://www.bbc.co.uk/music/reviews/3vfd",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"user": {
"created": "Wed, 07 May 2014 14:55:23 GMT",
"display_name": "Paul Clarke",
"id": "f5857a65-1eb1-4574-8843-ae6195de16fa",
"karma": 0,
"user_type": "Noob"
},
"votes": {
"positive": 0,
"negative": 0
}
}
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
return jsonify(review=db_review.to_dict(review))
@review_bp.route('/<uuid:review_id>/revisions', methods=['GET'])
@crossdomain()
def review_revisions_handler(review_id):
"""Get revisions of review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de/revisions \\
-X GET
**Response Example:**
.. code-block:: json
{
"revisions": [
{
"id": 1,
"review_id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"timestamp": "Tue, 10 Aug 2010 00:00:00 GMT",
"votes_negative": 0,
"votes_positive": 0
}
]
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
revisions = db_revision.get(review_id, limit=None)
count = len(revisions)
for i, r in enumerate(revisions):
r.update(id=count - i)
return jsonify(revisions=revisions)
@review_bp.route('/<uuid:review_id>/revisions/<int:rev>', methods=['GET'])
@crossdomain()
def review_revision_entity_handler(review_id, rev):
"""Get a particular revisions of review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de/revisions/1 \\
-X GET
**Response Example:**
.. code-block:: json
{
"revision": {
"id": 1,
"review_id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"timestamp": "Tue, 10 Aug 2010 00:00:00 GMT",
"votes_negative": 0,
"votes_positive": 0
}
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
count = db_revision.get_count(review["id"])
if rev > count:
raise NotFound("Can't find the revision you are looking for.")
revision = db_revision.get(review_id, offset=count - rev)[0]
revision.update(id=rev)
return jsonify(revision=revision)
@review_bp.route('/<uuid:review_id>', methods=['DELETE'])
@oauth.require_auth('review')
@crossdomain()
def review_delete_handler(review_id, user):
"""Delete review with a specified UUID.
**OAuth scope:** review
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:statuscode 200: success
:statuscode 403: access denied
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if str(review["user_id"]) != user.id:
raise AccessDenied
db_review.delete(review_id)
return jsonify(message='Request processed successfully')
@review_bp.route('/<uuid:review_id>', methods=['POST'])
@oauth.require_auth('review')
@crossdomain()
def review_modify_handler(review_id, user):
"""Update review with a specified UUID.
**OAuth scope:** review
:json string text: Text part of review, min length is 25, max is 5000 **(optional)**
:json integer rating: Rating part of review, min is 1, max is 5 **(optional)**
**NOTE:** Please provide only those parameters which need to be updated
:statuscode 200: success
:statuscode 400: invalid request
:statuscode 403: access denied
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
def fetch_params(review):
try:
text = Parser.string('json', 'text', min=REVIEW_TEXT_MIN_LENGTH, max=REVIEW_TEXT_MAX_LENGTH)
except MissingDataError:
text = review['text']
try:
rating = Parser.int('json', 'rating', min=REVIEW_RATING_MIN, max=REVIEW_RATING_MAX)
except MissingDataError:
rating = review['rating']
if text is None and rating is None:
raise InvalidRequest(desc='Review must have either text or rating')
return text, rating
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if str(review["user_id"]) != user.id:
raise AccessDenied
text, rating = fetch_params(review)
if (text == review['text']) and (rating == review['rating']):
return jsonify(message='Request processed successfully', review=dict(id=review["id"]))
db_review.update(
review_id=review_id,
drafted=review["is_draft"],
text=text,
rating=rating
)
return jsonify(message='Request processed successfully',
review=dict(id=review["id"]))
@review_bp.route('/', methods=['GET'])
@crossdomain()
def review_list_handler():
"""Get list of reviews.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/?limit=1&offset=50" \\
-X GET
**Response Example:**
.. code-block:: json
{
"count": 9197,
"limit": 1,
"offset": 50,
"reviews": [
{
"created": "Fri, 16 May 2008 00:00:00 GMT",
"edits": 0,
"entity_id": "09259937-6477-3959-8b10-af1cbaea8e6e",
"entity_type": "release_group",
"id": "c807d0b4-0dd0-43fe-a7c4-d29bb61f389e",
"language": "en",
"last_updated": "Fri, 16 May 2008 00:00:00 GMT",
"license": {
"full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported",
"id": "CC BY-NC-SA 3.0",
"info_url": "https://creativecommons.org/licenses/by-nc-sa/3.0/"
},
"popularity": 0,
"source": "BBC",
"source_url": "http://www.bbc.co.uk/music/reviews/vh54",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"user": {
"created": "Wed, 07 May 2014 16:20:47 GMT",
"display_name": "Jenny Nelson",
"id": "3bf3fe0c-6db2-4746-bcf1-f39912113852",
"karma": 0,
"user_type": "Noob"
},
"votes": {
"positive": 0,
"negative": 0
}
}
]
}
:json uuid entity_id: UUID of the release group that is being reviewed
:json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc. **(optional)**
:query user_id: user's UUID **(optional)**
:query sort: ``popularity`` or ``published_on`` **(optional)**
:query limit: results limit, min is 0, max is 50, default is 50 **(optional)**
:query offset: result offset, default is 0 **(optional)**
:query language: language code (ISO 639-1) **(optional)**
:resheader Content-Type: *application/json*
"""
# TODO: This checking is added to keep old clients working and needs to be removed.
release_group = Parser.uuid('uri', 'release_group', optional=True)
if release_group:
entity_id = release_group
entity_type = 'release_group'
else:
entity_id = Parser.uuid('uri', 'entity_id', optional=True)
entity_type = Parser.string('uri', 'entity_type', valid_values=ENTITY_TYPES, optional=True)
user_id = Parser.uuid('uri', 'user_id', optional=True)
sort = Parser.string('uri', 'sort', valid_values=['popularity', 'published_on', 'rating', 'created'], optional=True)
# "rating" and "created" sort values are deprecated and but allowed here for backward compatibility
if sort == 'created':
sort = 'published_on'
if sort == 'rating':
sort = 'popularity'
limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50
offset = Parser.int('uri', 'offset', optional=True) or 0
language = Parser.string('uri', 'language', min=2, max=3, optional=True)
if language and language not in supported_languages:
raise InvalidRequest(desc='Unsupported language')
# TODO(roman): Ideally caching logic should live inside the model. Otherwise it
# becomes hard to track all this stuff.
cache_key = cache.gen_key('list', entity_id, user_id, sort, limit, offset, language)
cached_result = cache.get(cache_key, REVIEW_CACHE_NAMESPACE)
if cached_result:
reviews = cached_result['reviews']
count = cached_result['count']
else:
reviews, count = db_review.list_reviews(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
sort=sort,
limit=limit,
offset=offset,
language=language,
)
reviews = [db_review.to_dict(p) for p in reviews]
cache.set(cache_key, {
'reviews': reviews,
'count': count,
}, namespace=REVIEW_CACHE_NAMESPACE)
return jsonify(limit=limit, offset=offset, count=count, reviews=reviews)
@review_bp.route('/', methods=['POST'])
@oauth.require_auth('review')
@crossdomain()
def review_post_handler(user):
"""Publish a review.
**OAuth scope:** review
:reqheader Content-Type: *application/json*
:json uuid entity_id: UUID of the entity that is being reviewed
:json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc.
:json string text: Text part of review, min length is 25, max is 5000 **(optional)**
:json integer rating: Rating part of review, min is 1, max is 5 **(optional)**
:json string license_choice: license ID
:json string lang: language code (ISO 639-1), default is ``en`` **(optional)**
:json boolean is_draft: whether the review should be saved as a draft or not, default is ``False`` **(optional)**
**NOTE:** You must provide some text or rating for the review.
:resheader Content-Type: *application/json*
"""
def fetch_params():
is_draft = Parser.bool('json', 'is_draft', optional=True) or False
if is_draft:
REVIEW_TEXT_MIN_LENGTH = None
entity_id = Parser.uuid('json', 'entity_id')
entity_type = Parser.string('json', 'entity_type', valid_values=ENTITY_TYPES)
text = Parser.string('json', 'text', min=REVIEW_TEXT_MIN_LENGTH, max=REVIEW_TEXT_MAX_LENGTH, optional=True)
rating = Parser.int('json', 'rating', min=REVIEW_RATING_MIN, max=REVIEW_RATING_MAX, optional=True)
license_choice = Parser.string('json', 'license_choice')
language = Parser.string('json', 'language', min=2, max=3, optional=True) or 'en'
if text is None and rating is None:
raise InvalidRequest(desc='Review must have either text or rating')
if language and language not in supported_languages:
raise InvalidRequest(desc='Unsupported language')
if db_review.list_reviews(user_id=user.id, entity_id=entity_id)[1]:
raise InvalidRequest(desc='You have already published a review for this album')
return entity_id, entity_type, text, rating, license_choice, language, is_draft
if user.is_review_limit_exceeded:
raise LimitExceeded('You have exceeded your limit of reviews per day.')
entity_id, entity_type, text, rating, license_choice, language, is_draft = fetch_params()
review = db_review.create(
user_id=user.id,
entity_id=entity_id,
entity_type=entity_type,
text=text,
rating=rating,
license_id=license_choice,
language=language,
is_draft=is_draft,
)
return jsonify(message='Request processed successfully', id=review["id"])
@review_bp.route('/languages', methods=['GET'])
@crossdomain()
def languages_list_handler():
"""Get list of supported review languages (language codes from ISO 639-1).
**Example Request:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/languages \\
-X GET
**Example Response:**
.. code-block:: json
{
"languages": [
"aa",
"ab",
"af",
"ak",
"yo",
"za",
"zh",
"zu"
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(languages=supported_languages)
@review_bp.route('/<uuid:review_id>/vote', methods=['GET'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_entity_handler(review_id, user):
"""Get your vote for a specified review.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X GET \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"vote": {
"vote": true,
"voted_at": "Thu, 22 Dec 2016 11:49:56 GMT"
}
}
**OAuth scope:** vote
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
try:
vote = db_vote.get(user_id=user.id, revision_id=review["last_revision"]["id"])
except db_exceptions.NoDataFoundException:
raise NotFound("Can't find your vote for this review.")
return jsonify(vote)
@review_bp.route('/<uuid:review_id>/vote', methods=['PUT'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_put_handler(review_id, user):
"""Set your vote for a specified review.
**OAuth scope:** vote
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X PUT \\
-H "Content-type: application/json" \\
-H "Authorization: Bearer <access token>" \\
-d '{"vote":true}'
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:json boolean vote: ``true`` if upvote, ``false`` if downvote
**NOTE:** Voting on reviews without text is not allowed.
:statuscode 200: success
:statuscode 400: invalid request (see source)
:statuscode 403: daily vote limit exceeded
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
def fetch_params():
vote = Parser.bool('json', 'vote')
return vote
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
vote = fetch_params()
if str(review["user_id"]) == user.id:
raise InvalidRequest(desc='You cannot rate your own review.')
if review["text"] is None:
raise InvalidRequest(desc='Voting on reviews without text is not allowed.')
if user.is_vote_limit_exceeded and not db_users.has_voted(user.id, review_id):
raise LimitExceeded('You have exceeded your limit of votes per day.')
db_vote.submit(
user_id=user.id,
revision_id=review["last_revision"]["id"],
vote=vote, # overwrites an existing vote, if needed
)
return jsonify(message='Request processed successfully')
@review_bp.route('/<uuid:review_id>/vote', methods=['DELETE'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_delete_handler(review_id, user):
"""Delete your vote for a specified review.
**OAuth scope:** vote
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
try:
vote = db_vote.get(user_id=user.id, revision_id=review["last_revision"]["id"])
except db_exceptions.NoDataFoundException:
raise InvalidRequest("Review is not rated yet.")
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
return jsonify(message="Request processed successfully")
@review_bp.route('/<uuid:review_id>/report', methods=['POST'])
@oauth.require_auth('vote')
@crossdomain()
def review_spam_report_handler(review_id, user):
"""Create spam report for a specified review.
**OAuth scope:** vote
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if review["user_id"] == user.id:
raise InvalidRequest('own')
db_spam_report.create(review["last_revision"]["id"], user.id, "Spam")
return jsonify(message="Spam report created successfully")
| 32.41875 | 120 | 0.616156 |
59b4f521b1c7923a231bdc8ecd7ee44c60036330 | 949 | py | Python | bot.py | AktanKasymaliev/insta_download | cf16ba55f8d47ed300b5fd543aaeac614b49276a | [
"MIT"
] | 1 | 2022-03-16T14:47:18.000Z | 2022-03-16T14:47:18.000Z | bot.py | AktanKasymaliev/insta_download | cf16ba55f8d47ed300b5fd543aaeac614b49276a | [
"MIT"
] | null | null | null | bot.py | AktanKasymaliev/insta_download | cf16ba55f8d47ed300b5fd543aaeac614b49276a | [
"MIT"
] | 4 | 2021-08-08T14:22:22.000Z | 2022-03-28T11:20:49.000Z | import telebot
from config import import_conf
from telebot import types
from parse_insta import main, delete_file
bot = telebot.TeleBot(import_conf("BOT", "token"))
@bot.message_handler(commands=['start'])
def send_welcome(message: types.Message):
chat_id = message.chat.id
bot.send_message(chat_id, "Hello, send me instagram link for download")
@bot.message_handler(content_types=['text'])
def send_downloaded_file(message: types.Message):
chat_id = message.chat.id
if message.text.startswith('http'):
bot.send_message(chat_id, "Wait a second...")
file_name = main(link=message.text)
if message.text.endswith('.jpg'):
bot.send_photo(chat_id, open(file_name, 'rb'))
delete_file(file_name)
else:
bot.send_video(chat_id, open(file_name, 'rb'))
delete_file(file_name)
else:
bot.send_message(chat_id, "I can read only links")
bot.polling() | 31.633333 | 75 | 0.687039 |
f220680fdbaebcac41a467a8e78051669d528cea | 405 | py | Python | qa/pull-tester/tests_config.py | newstartblockchain/- | e58851967dd58bcd228794ff4904fd3a6c3e8989 | [
"MIT"
] | null | null | null | qa/pull-tester/tests_config.py | newstartblockchain/- | e58851967dd58bcd228794ff4904fd3a6c3e8989 | [
"MIT"
] | null | null | null | qa/pull-tester/tests_config.py | newstartblockchain/- | e58851967dd58bcd228794ff4904fd3a6c3e8989 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
BUILDDIR="/home/smilecoin/Desktop/sml2.0"
EXEEXT=""
# These will turn into comments if they were disabled when configuring.
ENABLE_WALLET=1
ENABLE_UTILS=1
ENABLE_BITCOIND=1
#ENABLE_ZMQ=1
| 25.3125 | 71 | 0.785185 |
d4010288139e89f92e7e1a1c715375c19d4b15d5 | 2,779 | py | Python | spira/yevon/gdsii/base.py | JCoetzee123/spira | dae08feba1578ecc8745b45109f4fb7bef374546 | [
"MIT"
] | null | null | null | spira/yevon/gdsii/base.py | JCoetzee123/spira | dae08feba1578ecc8745b45109f4fb7bef374546 | [
"MIT"
] | null | null | null | spira/yevon/gdsii/base.py | JCoetzee123/spira | dae08feba1578ecc8745b45109f4fb7bef374546 | [
"MIT"
] | null | null | null | from spira.core.transformable import Transformable
from spira.core.parameters.initializer import ParameterInitializer
from spira.core.parameters.initializer import MetaInitializer
from spira.core.parameters.descriptor import FunctionParameter
from spira.yevon.process.gdsii_layer import LayerParameter
from spira.core.parameters.variables import *
from spira.yevon.process import get_rule_deck
RDD = get_rule_deck()
class MetaElement(MetaInitializer):
""" """
def __call__(cls, *params, **keyword_params):
kwargs = cls.__map_parameters__(*params, **keyword_params)
cls = super().__call__(**kwargs)
cls.__keywords__ = kwargs
return cls
class __Element__(Transformable, ParameterInitializer, metaclass=MetaElement):
""" Base class for all transformable elements. """
def get_node_id(self):
if self.__id__:
return self.__id__
else:
return self.__str__()
def set_node_id(self, value):
self.__id__ = value
node_id = FunctionParameter(get_node_id, set_node_id)
location_name = StringParameter(default='')
def __init__(self, transformation=None, **kwargs):
super().__init__(transformation=transformation, **kwargs)
def __add__(self, other):
if isinstance(other, list):
l = spira.ElementList([self])
l.extend(other)
return l
elif isinstance(other, __Element__):
return spira.ElementList([self, other])
else:
raise TypeError("Wrong type of argument for addition in __Element__: " + str(type(other)))
def __radd__(self, other):
if isinstance(other, list):
l = spira.ElementList(other)
l.append(self)
return l
elif isinstance(other, __Element__):
return spira.ElementList([other, self])
else:
raise TypeError("Wrong type of argument for addition in __Element__: " + str(type(other)))
def flatten(self):
return [self]
def dependencies(self):
return None
class __LayerElement__(__Element__):
""" """
layer = LayerParameter()
def __init__(self, layer=0, transformation=None, **kwargs):
super().__init__(layer=layer, transformation=transformation, **kwargs)
def __eq__(self, other):
if other == None:
return False
if not isinstance(other, __LayerElement__):
return False
if other.layer.key != self.layer.key:
return False
if self.shape.transform_copy(self.transformation) != other.shape.transform_copy(other.transformation):
return False
return True
def __ne__(self,other):
return not self.__eq__(other)
| 30.877778 | 110 | 0.652033 |
6bfc5be3b783b092806d7870e58a5d23175fd85f | 2,336 | py | Python | optuna/integration/keras.py | Jeyhooon/optuna | 0a5560cd0c8e83fe03f63ab431a513bf893f7d4d | [
"MIT"
] | 1 | 2019-05-28T07:29:49.000Z | 2019-05-28T07:29:49.000Z | optuna/integration/keras.py | nabenabe0928/optuna | aa505125de8515518fe19ba227edf7a1d3f8ebda | [
"MIT"
] | null | null | null | optuna/integration/keras.py | nabenabe0928/optuna | aa505125de8515518fe19ba227edf7a1d3f8ebda | [
"MIT"
] | 2 | 2020-03-03T00:40:28.000Z | 2021-01-28T11:54:32.000Z | from typing import Dict
from typing import Optional
import optuna
from optuna._deprecated import deprecated
with optuna._imports.try_import() as _imports:
from keras.callbacks import Callback
if not _imports.is_successful():
Callback = object # NOQA
@deprecated(
"2.1.0",
text="Recent Keras release (2.4.0) simply redirects all APIs "
"in the standalone keras package to point to tf.keras. "
"There is now only one Keras: tf.keras. "
"There may be some breaking changes for some workflows by upgrading to keras 2.4.0. "
"Test before upgrading. "
"REF:https://github.com/keras-team/keras/releases/tag/2.4.0",
)
class KerasPruningCallback(Callback):
"""Keras callback to prune unpromising trials.
See `the example <https://github.com/optuna/optuna/blob/master/
examples/pruning/keras_integration.py>`__
if you want to add a pruning callback which observes validation accuracy.
Args:
trial:
A :class:`~optuna.trial.Trial` corresponding to the current evaluation of the
objective function.
monitor:
An evaluation metric for pruning, e.g., ``val_loss`` and
``val_accuracy``. Please refer to `keras.Callback reference
<https://keras.io/callbacks/#callback>`_ for further details.
interval:
Check if trial should be pruned every n-th epoch. By default ``interval=1`` and
pruning is performed after every epoch. Increase ``interval`` to run several
epochs faster before applying pruning.
"""
def __init__(self, trial: optuna.trial.Trial, monitor: str, interval: int = 1) -> None:
super(KerasPruningCallback, self).__init__()
_imports.check()
self._trial = trial
self._monitor = monitor
self._interval = interval
def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, float]] = None) -> None:
if (epoch + 1) % self._interval != 0:
return
logs = logs or {}
current_score = logs.get(self._monitor)
if current_score is None:
return
self._trial.report(float(current_score), step=epoch)
if self._trial.should_prune():
message = "Trial was pruned at epoch {}.".format(epoch)
raise optuna.TrialPruned(message)
| 35.938462 | 91 | 0.657106 |
b5682525aa125baf1cadc1429f98cfe12ee6dcbd | 1,743 | py | Python | MuPythonLibrary/Uefi/Capsule/CatGenerator_test.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | null | null | null | MuPythonLibrary/Uefi/Capsule/CatGenerator_test.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | null | null | null | MuPythonLibrary/Uefi/Capsule/CatGenerator_test.py | matthewfcarlson/mu_pip_python_library | 659538b80fd5c060e053e14a828d9d41161682a1 | [
"BSD-2-Clause"
] | null | null | null | import os
import unittest
from MuPythonLibrary.Uefi.Capsule.CatGenerator import CatGenerator
# must run from build env or set PYTHONPATH env variable to point to the MuPythonLibrary folder
class CatGeneratorTest(unittest.TestCase):
def test_win10_OS(self):
o = CatGenerator("x64", "win10")
self.assertEqual(o.OperatingSystem, "10")
def test_10_OS(self):
o = CatGenerator("x64", "10")
self.assertEqual(o.OperatingSystem, "10")
def test_win10Server_OS(self):
o = CatGenerator("x64", "Server10")
self.assertEqual(o.OperatingSystem, "Server10")
def test_invalid_OS(self):
with self.assertRaises(ValueError):
CatGenerator("x64", "Invalid Junk")
def test_x64_arch(self):
o = CatGenerator("x64", "win10")
self.assertEqual(o.Arch, "X64")
def test_amd64_arch(self):
o = CatGenerator("amd64", "win10")
self.assertEqual(o.Arch, "X64")
def test_arm_arch(self):
o = CatGenerator("arm", "win10")
self.assertEqual(o.Arch, "ARM")
def test_arm64_arch(self):
o = CatGenerator("arm64", "win10")
self.assertEqual(o.Arch, "ARM64")
def test_aarch64_arch(self):
o = CatGenerator("aarch64", "win10")
self.assertEqual(o.Arch, "ARM64")
def test_invalid_arch(self):
with self.assertRaises(ValueError):
CatGenerator("Invalid Arch", "win10")
def test_invalid_pathtotool(self):
o = CatGenerator("amd64", "10")
with self.assertRaises(Exception) as cm:
o.MakeCat("garbage", os.path.join("c:", "test", "badpath", "inf2cat.exe"))
self.assertTrue(str(cm.exception).startswith("Can't find Inf2Cat on this machine."))
| 31.690909 | 95 | 0.644865 |
5253590c78323092de9de531e620ea85ba83f4ac | 271 | py | Python | Code/train.py | Elitedestroyer2/Waifu_recongizer | 3d28193b78374b954e29de70271fe9a97528613c | [
"MIT"
] | null | null | null | Code/train.py | Elitedestroyer2/Waifu_recongizer | 3d28193b78374b954e29de70271fe9a97528613c | [
"MIT"
] | null | null | null | Code/train.py | Elitedestroyer2/Waifu_recongizer | 3d28193b78374b954e29de70271fe9a97528613c | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
print(train_images)
print(train_labels)
print(test_images) | 24.636364 | 88 | 0.782288 |
47471bf5dca6e5e15b2c531b34a263f4083063a8 | 435 | py | Python | S1/TP5/ex7.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | S1/TP5/ex7.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | S1/TP5/ex7.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | import random
def cree(n):
return [random.randint(0, 20) for i in range(n)]
def apparitions(x, lis):
return [indice for indice, item in enumerate(lis) if item == x]
entiers = int(input("Saisir le nombre d'entiers de la liste : "))
liste = cree(entiers)
print(f"Liste : {liste}")
entierRecherche = int(input("Saisir l'entier recherché : "))
print(f"Liste des indices d'apparition : {apparitions(entierRecherche, liste)}")
| 25.588235 | 80 | 0.698851 |
bbdd676802bbb0bf4ff7e70bbd4d43e2bf5a9dad | 4,332 | py | Python | test/test_arns.py | fishzle/policy_sentry | aa880c0ff87565132538e1d23ebf113d24c6b3b5 | [
"MIT"
] | 1 | 2019-10-22T00:50:47.000Z | 2019-10-22T00:50:47.000Z | test/test_arns.py | fishzle/policy_sentry | aa880c0ff87565132538e1d23ebf113d24c6b3b5 | [
"MIT"
] | null | null | null | test/test_arns.py | fishzle/policy_sentry | aa880c0ff87565132538e1d23ebf113d24c6b3b5 | [
"MIT"
] | null | null | null | import unittest
from pathlib import Path
from policy_sentry.shared.arns import does_arn_match
from policy_sentry.shared.database import connect_db
home = str(Path.home())
config_directory = '/.policy_sentry/'
database_file_name = 'aws.sqlite3'
database_path = home + config_directory + database_file_name
db_session = connect_db(database_path)
# "Does Arn Match" tests
# See docs for this list: # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns
# Case 1: arn:partition:service:region:account-id:resource
# Case 2: arn:partition:service:region:account-id:resourcetype/resource
# Case 3: arn:partition:service:region:account-id:resourcetype/resource/qualifier
# Case 4: arn:partition:service:region:account-id:resourcetype/resource:qualifier
# Case 5: arn:partition:service:region:account-id:resourcetype:resource
# Case 6: arn:partition:service:region:account-id:resourcetype:resource:qualifier
class ArnsTestCase(unittest.TestCase):
def test_does_arn_match_case_bucket(self):
# Case 1: arn:partition:service:region:account-id:resource
arn_to_test = "arn:aws:s3:::bucket_name"
arn_in_database = "arn:aws:s3:::${BucketName}"
self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
def test_does_arn_match_case_1(self):
# Case 1: arn:partition:service:region:account-id:resource
arn_to_test = "arn:aws:codecommit:us-east-1:123456789012:MyDemoRepo"
arn_in_database = "arn:aws:codecommit:${Region}:${Account}:${RepositoryName}"
self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
def test_does_arn_match_case_2(self):
# Case 2: arn:partition:service:region:account-id:resourcetype/resource
arn_to_test = "arn:aws:ssm:us-east-1:123456789012:parameter/test"
arn_in_database = "arn:aws:ssm:${Region}:${Account}:parameter/${FullyQualifiedParameterName}"
self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
# This one is failing
# def test_does_arn_match_case_3(self):
# # Case 3: arn:partition:service:region:account-id:resourcetype/resource/qualifier
# arn_to_test = "arn:aws:kinesis:us-east-1:account-id:firehose/myfirehose/consumer/someconsumer:${ConsumerCreationTimpstamp}"
# arn_in_database = "arn:aws:kinesis:${Region}:${Account}:${StreamType}/${StreamName}/consumer/${ConsumerName}:${ConsumerCreationTimpstamp}"
# # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ConsumerDescription.html
# self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
def test_does_arn_match_case_4(self):
# Case 4: arn:partition:service:region:account-id:resourcetype/resource:qualifier
arn_to_test = "arn:aws:batch:region:account-id:job-definition/job-name:revision"
arn_in_database = "arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision}"
self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
def test_does_arn_match_case_5(self):
# Case 5: arn:partition:service:region:account-id:resourcetype:resource
arn_to_test = "arn:aws:states:region:account-id:stateMachine:stateMachineName"
arn_in_database = "arn:aws:states:${Region}:${Account}:stateMachine:${StateMachineName}"
self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
def test_does_arn_match_case_6(self):
# Case 6: arn:partition:service:region:account-id:resourcetype:resource:qualifier
arn_to_test = "arn:aws:states:region:account-id:execution:stateMachineName:executionName"
arn_in_database = "arn:aws:states:${Region}:${Account}:execution:${StateMachineName}:${ExecutionId}"
self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
# def test_does_arn_match_case_greengrass(self):
# # Undocumented case: AWS Greengrass: arn:aws:greengrass:${Region}:${Account}:/greengrass/definition/devices/${DeviceDefinitionId}/versions/${VersionId}
# arn_to_test = "arn:aws:greengrass:${Region}:${Account}:/greengrass/definition/devices/1234567}/versions/1"
# arn_in_database = "arn:aws:greengrass:${Region}:${Account}:/greengrass/definition/devices/${DeviceDefinitionId}/versions/${VersionId}"
# self.assertTrue(does_arn_match(arn_to_test, arn_in_database))
| 59.342466 | 161 | 0.751385 |
e16bef9f6db78f6969e3494091663af67f6315de | 2,947 | py | Python | test/pyaz/network/private_link_service/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | test/pyaz/network/private_link_service/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | 9 | 2021-09-24T16:37:24.000Z | 2021-12-24T00:39:19.000Z | test/pyaz/network/private_link_service/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def create(resource_group, name, subnet, lb_frontend_ip_configs, private_ip_address=None, private_ip_allocation_method=None, private_ip_address_version=None, vnet_name=None, public_ip_address=None, location=None, tags=None, lb_name=None, visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None, edge_zone=None):
params = get_params(locals())
command = "az network private-link-service create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, name):
params = get_params(locals())
command = "az network private-link-service delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group=None):
params = get_params(locals())
command = "az network private-link-service list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, name, expand=None):
params = get_params(locals())
command = "az network private-link-service show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, name, tags=None, lb_frontend_ip_configs=None, lb_name=None, visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None, set=None, add=None, remove=None, force_string=None):
params = get_params(locals())
command = "az network private-link-service update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 39.824324 | 331 | 0.690193 |
79908e2a179c229b70d3fc3bd04c0b43f0ae708b | 12,568 | py | Python | tensorflow_probability/python/bijectors/soft_clip.py | jakee417/probability-1 | ae7117f37ac441bc7a888167ea23e5e620c5bcde | [
"Apache-2.0"
] | 1 | 2020-06-03T14:20:56.000Z | 2020-06-03T14:20:56.000Z | tensorflow_probability/python/bijectors/soft_clip.py | jakee417/probability-1 | ae7117f37ac441bc7a888167ea23e5e620c5bcde | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/soft_clip.py | jakee417/probability-1 | ae7117f37ac441bc7a888167ea23e5e620c5bcde | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""SoftClip bijector."""
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.bijectors import chain
from tensorflow_probability.python.bijectors import scale
from tensorflow_probability.python.bijectors import shift
from tensorflow_probability.python.bijectors import softplus
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'SoftClip',
]
class SoftClip(bijector.AutoCompositeTensorBijector):
"""Bijector that approximates clipping as a continuous, differentiable map.
The `forward` method takes unconstrained scalar `x` to a value `y` in
`[low, high]`. For values within the interval and far from the bounds
(`low << x << high`), this mapping is approximately the identity mapping.
```python
b = tfb.SoftClip(low=-10., high=10.)
b.forward([-15., -7., 1., 9., 20.])
# => [-9.993284, -6.951412, 0.9998932, 8.686738, 9.999954 ]
```
The softness of the clipping can be adjusted via the `hinge_softness`
parameter. A sharp constraint (`hinge_softness < 1.0`) will approximate
the identity mapping very well across almost all of its range, but may
be numerically ill-conditioned at the boundaries. A soft constraint
(`hinge_softness > 1.0`) corresponds to a smoother, better-conditioned
mapping, but creates a larger distortion of its inputs.
```python
b_hard = SoftClip(low=-5, high=5., hinge_softness=0.1)
b_soft.forward([-15., -7., 1., 9., 20.])
# => [-10., -7., 1., 8.999995, 10.]
b_soft = SoftClip(low=-5, high=5., hinge_softness=10.0)
b_soft.forward([-15., -7., 1., 9., 20.])
# => [-6.1985435, -3.369276, 0.16719627, 3.6655345, 7.1750355]
```
Note that the outputs are always in the interval `[low, high]`, regardless
of the `hinge_softness`.
#### Example use
A trivial application of this bijector is to constrain the values sampled
from a distribution:
```python
dist = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.SoftClip(low=-5., high=5.))
samples = dist.sample(100) # => samples guaranteed in [-10., 10.]
```
A more useful application is to constrain the values considered
during inference, preventing an inference algorithm from proposing values
that cause numerical issues. For example, this model will return a `log_prob`
of `NaN` when `z` is outside of the range `[-5., 5.]`:
```python
dist = tfd.JointDistributionNamed({
'z': tfd.Normal(0., 1.0)
'x': lambda z: tfd.Normal(
loc=tf.log(25 - z**2), # Breaks if z >= 5 or z <= -5.
scale=1.)})
```
Using SoftClip allows us to keep an inference algorithm in the feasible
region without distorting the inference geometry by very much:
```python
target_log_prob_fn = lambda z: dist.log_prob(z=z, x=3.) # Condition on x==3.
# Use SoftClip to ensure sampler stays within the numerically valid region.
mcmc_samples = tfp.mcmc.sample_chain(
kernel=tfp.mcmc.TransformedTransitionKernel(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=2,
step_size=0.1),
bijector=tfb.SoftClip(-5., 5.)),
trace_fn=None,
current_state=0.,
num_results=100)
```
#### Mathematical Details
The constraint is built by using `softplus(x) = log(1 + exp(x))` as a smooth
approximation to `max(x, 0)`. In combination with affine transformations, this
can implement a constraint to any scalar interval.
In particular, translating `softplus` gives a generic lower bound constraint:
```
max(x, low) = max(x - low, 0) + low
~= softplus(x - low) + low
:= softlower(x)
```
Note that this quantity is always greater than `low` because `softplus` is
positive-valued. We can also implement a soft upper bound:
```
min(x, high) = min(x - high, 0) + high
= -max(high - x, 0) + high
~= -softplus(high - x) + high
:= softupper(x)
```
which, similarly, is always less than `high`.
Composing these bounds as `softupper(softlower(x))` gives a quantity bounded
above by `high`, and bounded below by `softupper(low)` (because `softupper`
is monotonic and its input is bounded below by `low`). In general, we will
have `softupper(low) < low`, so we need to shrink the interval slightly
(by `(high - low) / (high - softupper(low))`) to preserve the lower bound.
The two-sided constraint is therefore:
```python
softclip(x) := (softupper(softlower(x)) - high) *
(high - low) / (high - softupper(low)) + high
= -softplus(high - low - softplus(x - low)) *
(high - low) / (softplus(high-low)) + high
```
Due to this rescaling, the bijector can be mildly asymmetric. Values
of equal distance from the endpoints are mapped to values with slightly
unequal distance from the endpoints; for example,
```python
b = SoftConstrain(-1., 1.)
b.forward([-0.5., 0.5.])
# => [-0.2527727 , 0.19739306]
```
The degree of the asymmetry is proportional to the size of the rescaling
correction, i.e., the extent to which `softupper` fails to be the identity
map at the lower end of the interval. This is maximized when the upper and
lower bounds are very close together relative to the hinge softness, as in
the example above. Conversely, when the interval is wide, the required
correction and asymmetry are very small.
"""
def __init__(self,
low=None,
high=None,
hinge_softness=None,
validate_args=False,
name='soft_clip'):
"""Instantiates the SoftClip bijector.
Args:
low: Optional float `Tensor` lower bound. If `None`, the lower-bound
constraint is omitted.
Default value: `None`.
high: Optional float `Tensor` upper bound. If `None`, the upper-bound
constraint is omitted.
Default value: `None`.
hinge_softness: Optional nonzero float `Tensor`. Controls the softness
of the constraint at the boundaries; values outside of the constraint
set are mapped into intervals of width approximately
`log(2) * hinge_softness` on the interior of each boundary. High
softness reserves more space for values outside of the constraint set,
leading to greater distortion of inputs *within* the constraint set,
but improved numerical stability near the boundaries.
Default value: `None` (`1.0`).
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
parameters = dict(locals())
with tf.name_scope(name):
dtype = dtype_util.common_dtype(
[low, high, hinge_softness], dtype_hint=tf.float32)
low = tensor_util.convert_nonref_to_tensor(
low, name='low', dtype=dtype)
high = tensor_util.convert_nonref_to_tensor(
high, name='high', dtype=dtype)
hinge_softness = tensor_util.convert_nonref_to_tensor(
hinge_softness, name='hinge_softness', dtype=dtype)
softplus_bijector = softplus.Softplus(hinge_softness=hinge_softness)
negate = tf.convert_to_tensor(-1., dtype=dtype)
components = []
if low is not None and high is not None:
# Support reference tensors (eg Variables) for `high` and `low` by
# deferring all computation on them until needed.
width = tfp_util.DeferredTensor(
pretransformed_input=high, transform_fn=lambda high: high - low)
negated_shrinkage_factor = tfp_util.DeferredTensor(
pretransformed_input=width,
transform_fn=lambda w: tf.cast( # pylint: disable=g-long-lambda
negate * w / softplus_bijector.forward(w), dtype=dtype))
# Implement the soft constraint from 'Mathematical Details' above:
# softclip(x) := -softplus(width - softplus(x - low)) *
# (width) / (softplus(width)) + high
components = [
shift.Shift(high),
scale.Scale(negated_shrinkage_factor),
softplus_bijector,
shift.Shift(width),
scale.Scale(negate),
softplus_bijector,
shift.Shift(tfp_util.DeferredTensor(low, lambda x: -x))]
elif low is not None:
# Implement a soft lower bound:
# softlower(x) := softplus(x - low) + low
components = [
shift.Shift(low),
softplus_bijector,
shift.Shift(tfp_util.DeferredTensor(low, lambda x: -x))]
elif high is not None:
# Implement a soft upper bound:
# softupper(x) := -softplus(high - x) + high
components = [shift.Shift(high),
scale.Scale(negate),
softplus_bijector,
scale.Scale(negate),
shift.Shift(high)]
self._low = low
self._high = high
self._hinge_softness = hinge_softness
self._chain = chain.Chain(components, validate_args=validate_args)
super(SoftClip, self).__init__(
forward_min_event_ndims=0,
dtype=dtype,
validate_args=validate_args,
parameters=parameters,
is_constant_jacobian=not components,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
low=parameter_properties.ParameterProperties(),
high=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED),
hinge_softness=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))))
@property
def low(self):
return self._low
@property
def high(self):
return self._high
@property
def hinge_softness(self):
return self._hinge_softness
@classmethod
def _is_increasing(cls):
return True
def _forward(self, x):
return self._chain.forward(x)
def _forward_log_det_jacobian(self, x):
return self._chain.forward_log_det_jacobian(x, self.forward_min_event_ndims)
def _inverse(self, y):
with tf.control_dependencies(self._assert_valid_inverse_input(y)):
return self._chain.inverse(y) # pylint: disable=protected-access
def _inverse_log_det_jacobian(self, y):
with tf.control_dependencies(self._assert_valid_inverse_input(y)):
return self._chain.inverse_log_det_jacobian(
y, self.inverse_min_event_ndims)
def _assert_valid_inverse_input(self, y):
assertions = []
if self.validate_args and self.low is not None:
assertions += [assert_util.assert_greater(
y, self.low,
message='Input must be greater than `low`.')]
if self.validate_args and self.high is not None:
assertions += [assert_util.assert_less(
y, self.high,
message='Input must be less than `high`.')]
return assertions
def _parameter_control_dependencies(self, is_init):
if not self.validate_args or self.low is None or self.high is None:
return []
assertions = []
if is_init != (tensor_util.is_ref(self.low) or
tensor_util.is_ref(self.high)):
assertions.append(assert_util.assert_greater(
self.high, self.low,
message='Argument `high` must be greater than `low`.'))
return assertions
| 37.855422 | 80 | 0.666852 |
a33bd85f098431ccb2ffe3207b35bdb5ec67ec23 | 4,808 | py | Python | napari/_qt/dialogs/qt_reader_dialog.py | gselzer/napari | 410459a45db30efdf84c2339db9ceff8ccc570c8 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/dialogs/qt_reader_dialog.py | gselzer/napari | 410459a45db30efdf84c2339db9ceff8ccc570c8 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/dialogs/qt_reader_dialog.py | gselzer/napari | 410459a45db30efdf84c2339db9ceff8ccc570c8 | [
"BSD-3-Clause"
] | null | null | null | import os
from typing import Any, Dict, Optional, Tuple
from qtpy.QtWidgets import (
QButtonGroup,
QCheckBox,
QDialog,
QDialogButtonBox,
QLabel,
QRadioButton,
QVBoxLayout,
QWidget,
)
class QtReaderDialog(QDialog):
"""Dialog for user to select a reader plugin for a given file extension or folder"""
def __init__(
self,
pth: str = '',
parent: QWidget = None,
extension: str = '',
readers: Dict[str, str] = {},
error_message: str = '',
):
super().__init__(parent)
self.setObjectName('Choose reader')
self.setWindowTitle('Choose reader')
self._current_file = pth
self._extension = extension
self._reader_buttons = []
self.setup_ui(error_message, readers)
def setup_ui(self, error_message, readers):
"""Build UI using given error_messsage and readers dict"""
# add instruction label
layout = QVBoxLayout()
label = QLabel(
f"{error_message}Choose reader for {self._current_file}:"
)
layout.addWidget(label)
# add radio button for each reader plugin
self.reader_btn_group = QButtonGroup(self)
self.add_reader_buttons(layout, readers)
if self.reader_btn_group.buttons():
self.reader_btn_group.buttons()[0].toggle()
# OK & cancel buttons for the dialog
btns = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.btn_box = QDialogButtonBox(btns)
self.btn_box.accepted.connect(self.accept)
self.btn_box.rejected.connect(self.reject)
# checkbox to remember the choice (doesn't pop up for folders)
extension = os.path.splitext(self._current_file)[1]
if extension:
self.persist_checkbox = QCheckBox(
f'Remember this choice for files with a {extension} extension'
)
self.persist_checkbox.toggle()
layout.addWidget(self.persist_checkbox)
layout.addWidget(self.btn_box)
self.setLayout(layout)
def add_reader_buttons(self, layout, readers):
"""Add radio button to layout for each reader in readers"""
for display_name in sorted(readers):
button = QRadioButton(f"{display_name}")
self.reader_btn_group.addButton(button)
layout.addWidget(button)
def _get_plugin_choice(self):
"""Get user's plugin choice based on the checked button"""
checked_btn = self.reader_btn_group.checkedButton()
if checked_btn:
return checked_btn.text()
def _get_persist_choice(self):
"""Get persistence checkbox choice"""
return (
hasattr(self, 'persist_checkbox')
and self.persist_checkbox.isChecked()
)
def get_user_choices(self) -> Optional[Tuple[str, bool]]:
"""Execute dialog and get user choices"""
dialog_result = self.exec_()
# user pressed cancel
if not dialog_result:
return None
# grab the selected radio button text
display_name = self._get_plugin_choice()
# grab the persistence checkbox choice
persist_choice = self._get_persist_choice()
return display_name, persist_choice
def get_reader_choice_for_file(
readerDialog: Any, readers: Dict[str, str], has_errored: bool
) -> Optional[Tuple[str, bool]]:
"""Gets choice of reader from user for the given filename.
If there is just one reader and no error message, dialog
is not shown. Otherwise, launch dialog and ask user for
plugin choice and whether setting is persisted.
Returns None if user cancels on dialog.
Parameters
----------
readerDialog : QtReaderDialog or MockQtReaderDialog
reader dialog to use for choices from the user
readers : str
Dictionary of display-name:plugin-name of potential readers for file
has_errored : bool
True when we've tried to read this file and failed, otherwise False
Returns
-------
display_name: str
Display name of the chosen plugin
persist_choice: bool
Whether to persist the chosen plugin choice or not
"""
display_name = ''
persist_choice = False
# if we have just one reader and no errors from existing settings
if len(readers) == 1 and not has_errored:
# no need to open the dialog, just get the reader choice
display_name = next(iter(readers.keys()))
return display_name, persist_choice
# either we have more reader options or there was an error
res = readerDialog.get_user_choices()
# user pressed cancel, return None
if not res:
return
display_name, persist_choice = res
return display_name, persist_choice
| 32.707483 | 88 | 0.649126 |
0e1a13f98a43955e0978a849e020046f83c11478 | 717 | py | Python | omaha_server/omaha/migrations/0019_auto_20150707_0822.py | makar21/omaha-server | b84cdf6e67d9106e7a86b447204de4f82397b019 | [
"Apache-2.0"
] | 8 | 2018-06-25T07:20:17.000Z | 2021-02-07T20:01:04.000Z | omaha_server/omaha/migrations/0019_auto_20150707_0822.py | makar21/omaha-server | b84cdf6e67d9106e7a86b447204de4f82397b019 | [
"Apache-2.0"
] | 8 | 2018-06-22T21:56:27.000Z | 2020-06-25T15:22:56.000Z | omaha_server/omaha/migrations/0019_auto_20150707_0822.py | dentalwings/omaha-server | 3d8e18c8f4aac4eb16445c0f3160ed1fc2fc8de5 | [
"Apache-2.0"
] | 11 | 2019-01-22T01:36:42.000Z | 2022-03-09T01:41:32.000Z | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('omaha', '0018_auto_20150616_1112'),
]
operations = [
migrations.AlterModelOptions(
name='action',
options={},
),
migrations.AlterModelOptions(
name='application',
options={},
),
migrations.AlterModelOptions(
name='channel',
options={},
),
migrations.AlterModelOptions(
name='platform',
options={},
),
migrations.AlterModelOptions(
name='version',
options={},
),
]
| 20.485714 | 45 | 0.496513 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.