max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/inference_correctness/dcn_multi_hot.py | x-y-z/HugeCTR | 130 | 4039 | <filename>test/inference_correctness/dcn_multi_hot.py
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "dcn",
max_eval_batches = 1,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True,
use_mixed_precision = False,
scaler = 1.0,
use_cuda_graph = True,
metrics_spec = {hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum,
num_workers = 16)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 300,
embedding_vec_size = 16,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=416))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice11", "slice12"],
ranges=[(0,429),(0,429)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
bottom_names = ["slice11"],
top_names = ["multicross1"],
num_layers=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice12"],
top_names = ["fc1"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["dropout2", "multicross1"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.graph_to_json(graph_config_file = "/dump_infer/dcn.json")
model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = "/dump_infer/dcn")
model.export_predictions("/dump_infer/dcn_pred_" + str(2000), "/dump_infer/dcn_label_" + str(2000))
from hugectr.inference import InferenceParams, CreateInferenceSession
import numpy as np
batch_size = 16384
num_batches = 1
data_source = "./dcn_data/file_list_test.txt"
inference_params = InferenceParams(model_name = "dcn",
max_batchsize = batch_size,
hit_rate_threshold = 1.0,
dense_model_file = "/dump_infer/dcn_dense_2000.model",
sparse_model_files = ["/dump_infer/dcn0_sparse_2000.model"],
device_id = 0,
use_gpu_embedding_cache = False,
cache_size_percentage = 1.0,
i64_input_key = False,
use_mixed_precision = False,
use_cuda_graph = True)
inference_session = CreateInferenceSession("/dump_infer/dcn.json", inference_params)
predictions = inference_session.predict(num_batches = num_batches,
source = data_source,
data_reader_type = hugectr.DataReaderType_t.Norm,
check_type = hugectr.Check_t.Sum)
grount_truth = np.loadtxt("/dump_infer/dcn_pred_2000")
diff = predictions-grount_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))
sys.exit(1)
else:
print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse)) |
vbdiar/scoring/normalization.py | VarunSrivastava19/VBDiarization | 101 | 4051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: <NAME> <<EMAIL>>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
class Normalization(object):
""" Speaker normalization S-Norm. """
embeddings = None
in_emb_dir = None
def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,
out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,
plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):
""" Initialize normalization object.
Args:
norm_list (string_types): path to normalization list
audio_dir (string_types|None): path to audio directory
in_rttm_dir (string_types|None): path to directory with rttm files
in_emb_dir (str|None): path to directory with i-vectors
out_emb_dir (str|None): path to directory for storing embeddings
min_length (int): minimal length for extracting embeddings
features_extractor (Any): object for feature extraction
embedding_extractor (Any): object for extracting embedding
plda (PLDA|None): plda model object
wav_suffix (string_types): suffix of wav files
rttm_suffix (string_types): suffix of rttm files
"""
if audio_dir:
self.audio_dir = os.path.abspath(audio_dir)
self.norm_list = norm_list
if in_rttm_dir:
self.in_rttm_dir = os.path.abspath(in_rttm_dir)
else:
raise ValueError('It is required to have input rttm files for normalization.')
self.features_extractor = features_extractor
self.embedding_extractor = embedding_extractor
self.plda = plda
self.wav_suffix = wav_suffix
self.rttm_suffix = rttm_suffix
if in_emb_dir:
self.in_emb_dir = os.path.abspath(in_emb_dir)
if out_emb_dir:
self.out_emb_dir = os.path.abspath(out_emb_dir)
self.min_length = min_length
self.n_jobs = n_jobs
if self.in_emb_dir is None:
self.embeddings = self.extract_embeddings()
else:
self.embeddings = self.load_embeddings()
self.mean = np.mean(self.embeddings, axis=0)
def __iter__(self):
current = 0
while current < len(self.embeddings):
yield self.embeddings[current]
current += 1
def __getitem__(self, key):
return self.embeddings[key]
def __setitem__(self, key, value):
self.embeddings[key] = value
def __len__(self):
return len(self.embeddings)
def extract_embeddings(self):
""" Extract normalization embeddings using averaging.
Returns:
Tuple[np.array, np.array]: vectors for individual speakers, global mean over all speakers
"""
speakers_dict, fns = {}, []
with open(self.norm_list) as f:
for line in f:
if len(line.split()) > 1: # number of speakers is defined
line = line.split()[0]
else:
line = line.replace(os.linesep, '')
fns.append(line)
speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,
embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,
wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,
rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)
assert len(speakers_dict) == len(fns)
# all are the same
merged_speakers_dict = speakers_dict[0]
if self.out_emb_dir:
for speaker in merged_speakers_dict:
out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')
mkdir_p(os.path.dirname(out_path))
with open(out_path, 'wb') as f:
pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)
for speaker in merged_speakers_dict:
merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)
return np.array(list(merged_speakers_dict.values()))
def load_embeddings(self):
""" Load normalization embeddings from pickle files.
Returns:
np.array: embeddings per speaker
"""
embeddings, speakers = [], set()
with open(self.norm_list) as f:
for file_name in f:
if len(file_name.split()) > 1: # number of speakers is defined
file_name = file_name.split()[0]
else:
file_name = file_name.replace(os.linesep, '')
with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:
for line in fp:
speakers.add(line.split()[7])
logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))
for speaker in speakers:
embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))
if os.path.isfile(embedding_path):
logger.info('Loading normalization pickle file `{}`.'.format(speaker))
with open(embedding_path, 'rb') as f:
# append mean from speaker's embeddings
speaker_embeddings = pickle.load(f)
embeddings.append(np.mean(speaker_embeddings, axis=0))
else:
logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))
return np.array(embeddings)
def s_norm(self, test, enroll):
""" Run speaker normalization (S-Norm) on cached embeddings.
Args:
test (np.array): test embedding
enroll (np.array): enroll embedding
Returns:
float: hypothesis
"""
if self.plda:
a = self.plda.score(test, self.embeddings).T
b = self.plda.score(enroll, self.embeddings).T
c = self.plda.score(enroll, test).T
else:
a = cosine_similarity(test, self.embeddings).T
b = cosine_similarity(enroll, self.embeddings).T
c = cosine_similarity(enroll, test).T
scores = []
for ii in range(test.shape[0]):
test_scores = []
for jj in range(enroll.shape[0]):
test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])
enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])
s = c[ii][jj]
test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) / enroll_std) / 2))
scores.append(test_scores)
return np.array(scores)
|
nuke/pymmh3.py | jfpanisset/Cryptomatte | 543 | 4066 | '''
pymmh3 was written by <NAME> and enhanced by <NAME>, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''
import sys as _sys
if (_sys.version_info > (3, 0)):
def xrange( a, b, c ):
return list(range( a, b, c))
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
def fmix( k ):
k ^= k >> 33
k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py | likenamehaojie/Apache-Ambari-ZH | 1,664 | 4075 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
__all__ = ["get_bare_principal"]
def get_bare_principal(normalized_principal_name):
"""
Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the
primary component (nimbus)
:param normalized_principal_name: a string containing the principal name to process
:return: a string containing the primary component value or None if not valid
"""
bare_principal = None
if normalized_principal_name:
match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
if match:
bare_principal = match.group(1)
return bare_principal |
util/config/validators/test/test_validate_bitbucket_trigger.py | giuseppe/quay | 2,027 | 4082 | import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
@pytest.mark.parametrize(
"unvalidated_config",
[
(ValidatorContext({})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_KEY": "foo"}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_SECRET": "foo"}})),
],
)
def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):
validator = BitbucketTriggerValidator()
with pytest.raises(ConfigValidationException):
validator.validate(unvalidated_config)
def test_validate_bitbucket_trigger(app):
url_hit = [False]
@urlmatch(netloc=r"bitbucket.org")
def handler(url, request):
url_hit[0] = True
return {
"status_code": 200,
"content": "oauth_token=foo&oauth_token_secret=bar",
}
with HTTMock(handler):
validator = BitbucketTriggerValidator()
url_scheme_and_hostname = URLSchemeAndHostname("http", "localhost:5000")
unvalidated_config = ValidatorContext(
{
"BITBUCKET_TRIGGER_CONFIG": {
"CONSUMER_KEY": "foo",
"CONSUMER_SECRET": "bar",
},
},
url_scheme_and_hostname=url_scheme_and_hostname,
)
validator.validate(unvalidated_config)
assert url_hit[0]
|
rlcycle/dqn_base/loss.py | cyoon1729/Rlcycle | 128 | 4089 | from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
class DQNLoss(Loss):
"""Compute double DQN loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
q_value = network.forward(states).gather(1, actions)
with torch.no_grad():
next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_q = rewards + (1 - dones) * n_step_gamma * next_q
element_wise_loss = F.smooth_l1_loss(
q_value, target_q.detach(), reduction="none"
)
return element_wise_loss
class QRLoss(Loss):
"""Compute quantile regression loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * next_z
distance = target_z - z_dists
quantile_huber_loss = (
network.tau - (distance.detach() < 0).float()
).abs() * self.huber_loss(distance)
element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)
return element_wise_loss
@staticmethod
def huber_loss(x: List[torch.Tensor], k: float = 1.0):
return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
class CategoricalLoss(Loss):
"""Compute C51 loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
batch_size = states.size(0)
offset = (
torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)
.long()
.unsqueeze(1)
.expand(batch_size, network.num_atoms)
)
if self.use_cuda:
offset = offset.cuda()
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * network.support
target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)
target_proj = self.dist_projection(network, next_z, target_z, offset)
log_dist = torch.log(z_dists)
element_wise_loss = -(target_proj * log_dist).sum(1)
return element_wise_loss
def dist_projection(
self,
network: nn.Module,
next_z: torch.Tensor,
target_z: torch.Tensor,
offset: torch.Tensor,
) -> torch.Tensor:
b = (target_z - network.v_min) / network.delta_z
lb = b.floor().long()
ub = b.ceil().long()
proj_dist = torch.zeros(next_z.size())
if self.use_cuda:
proj_dist = proj_dist.cuda()
proj_dist.view(-1).index_add_(
0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)
)
return proj_dist
|
venv/Lib/site-packages/zmq/tests/test_draft.py | ajayiagbebaku/NFL-Model | 603 | 4103 | # -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import time
import pytest
import zmq
from zmq.tests import BaseZMQTestCase, skip_pypy
class TestDraftSockets(BaseZMQTestCase):
def setUp(self):
if not zmq.DRAFT_API:
raise pytest.skip("draft api unavailable")
super(TestDraftSockets, self).setUp()
def test_client_server(self):
client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)
client.send(b'request')
msg = self.recv(server, copy=False)
assert msg.routing_id is not None
server.send(b'reply', routing_id=msg.routing_id)
reply = self.recv(client)
assert reply == b'reply'
def test_radio_dish(self):
dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)
dish.rcvtimeo = 250
group = 'mygroup'
dish.join(group)
received_count = 0
received = set()
sent = set()
for i in range(10):
msg = str(i).encode('ascii')
sent.add(msg)
radio.send(msg, group=group)
try:
recvd = dish.recv()
except zmq.Again:
time.sleep(0.1)
else:
received.add(recvd)
received_count += 1
# assert that we got *something*
assert len(received.intersection(sent)) >= 5
|
base/site-packages/django_qbe/urls.py | edisonlz/fastor | 285 | 4106 | <filename>base/site-packages/django_qbe/urls.py
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django_qbe.exports import formats
urlpatterns = patterns('django_qbe.views',
url(r'^$', 'qbe_form', name="qbe_form"),
url(r'^js/$', 'qbe_js', name="qbe_js"),
url(r'^results/bookmark/$',
'qbe_bookmark', name="qbe_bookmark"),
url(r'^results/export/(?P<format>(%s))/$' % "|".join(formats.keys()),
'qbe_export', name="qbe_export"),
url(r'^results/proxy/$',
'qbe_proxy', name="qbe_proxy"),
url(r'^results/(?P<query_hash>(.*))/$',
'qbe_results', name="qbe_results"),
url(r'^auto/$', 'qbe_autocomplete', name="qbe_autocomplete"),
)
|
augment.py | docongminh/Text-Image-Augmentation-python | 217 | 4107 | # -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
|
devito/passes/iet/languages/C.py | guaacoelho/devito | 199 | 4111 | from devito.ir import Call
from devito.passes.iet.definitions import DataManager
from devito.passes.iet.langbase import LangBB
__all__ = ['CBB', 'CDataManager']
class CBB(LangBB):
mapper = {
'aligned': lambda i:
'__attribute__((aligned(%d)))' % i,
'host-alloc': lambda i, j, k:
Call('posix_memalign', (i, j, k)),
'host-free': lambda i:
Call('free', (i,)),
}
class CDataManager(DataManager):
lang = CBB
|
donkeycar/parts/pytorch/torch_data.py | adricl/donkeycar | 1,100 | 4113 | # PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False, resize=True):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform_items = [
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
]
if resize:
transform_items.insert(0, transforms.Resize(input_size))
return transforms.Compose(transform_items)
class TorchTubDataset(IterableDataset):
'''
Loads the dataset, and creates a train/test split.
'''
def __init__(self, config, records: List[TubRecord], transform=None):
"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""
self.config = config
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.sequence = TubSequence(records)
self.pipeline = self._create_pipeline()
self.len = len(records)
def _create_pipeline(self):
""" This can be overridden if more complicated pipelines are
required """
def y_transform(record: TubRecord):
angle: float = record.underlying['user/angle']
throttle: float = record.underlying['user/throttle']
predictions = torch.tensor([angle, throttle], dtype=torch.float)
# Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions = (predictions + 1) / 2
return predictions
def x_transform(record: TubRecord):
# Loads the result of Image.open()
img_arr = record.image(cached=True, as_nparray=False)
return self.transform(img_arr)
# Build pipeline using the transformations
pipeline = self.sequence.build_pipeline(x_transform=x_transform,
y_transform=y_transform)
return pipeline
def __len__(self):
return len(self.sequence)
def __iter__(self):
return iter(self.pipeline)
class TorchTubDataModule(pl.LightningDataModule):
def __init__(self, config: Any, tub_paths: List[str], transform=None):
"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""
super().__init__()
self.config = config
self.tub_paths = tub_paths
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.tubs: List[Tub] = [Tub(tub_path, read_only=True)
for tub_path in self.tub_paths]
self.records: List[TubRecord] = []
def setup(self, stage=None):
"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""
# Loop through all the different tubs and load all the records for each of them
for tub in self.tubs:
for underlying in tub:
record = TubRecord(self.config, tub.base_path,
underlying=underlying)
self.records.append(record)
train_records, val_records = train_test_split(
self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))
assert len(val_records) > 0, "Not enough validation data. Add more data"
self.train_dataset = TorchTubDataset(
self.config, train_records, transform=self.transform)
self.val_dataset = TorchTubDataset(
self.config, val_records, transform=self.transform)
def train_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
def val_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
|
tests/test_ordering.py | deepio-oc/pabot | 379 | 4131 | from robot import __version__ as ROBOT_VERSION
import sys
import tempfile
import textwrap
import unittest
import shutil
import subprocess
class PabotOrderingGroupTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_tests_with(self, testfile, orderfile):
robot_file = open("{}/test.robot".format(self.tmpdir), "w")
robot_file.write(textwrap.dedent(testfile))
robot_file.close()
with open("{}/order.dat".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(orderfile))
process = subprocess.Popen(
[
sys.executable,
"-m" "pabot.pabot",
"--testlevelsplit",
"--ordering",
"{}/order.dat".format(self.tmpdir),
"{}/test.robot".format(self.tmpdir),
],
cwd=self.tmpdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return process.communicate()
def test_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
def test_two_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Second And Quarter
Should Be Equal ${SCALAR} Hello, globe!
Second And Half
Should Be Equal ${SCALAR} Hello, globe!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
{
--test Test.Second And Quarter
--test Test.Second And Half
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = "5 critical tests, 5 passed, 0 failed"
else:
expected_write = "5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 3)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = b"5 critical tests, 5 passed, 0 failed"
else:
expected_write = b"5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 3)
def test_too_big_testname(self):
stdout, stderr = self._run_tests_with(
"""
*** Test Cases ***
Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus eget orci porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.
Log Test
""",
"""
--test Invalid
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 1)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 1)
def test_longnames_in_tests(self):
stdout, stderr = self._run_tests_with(
"""
*** Settings ***
Test Template Test1
*** Test Cases ***
The Somewhat Long Name Of The Test S1Test 01 1
The Somewhat Long Name Of The Test S1Test 02 1
The Somewhat Long Name Of The Test S1Test 03 1
The Somewhat Long Name Of The Test S1Test 04 1
The Somewhat Long Name Of The Test S1Test 05 1
The Somewhat Long Name Of The Test S1Test 06 1
The Somewhat Long Name Of The Test S1Test 07 1
The Somewhat Long Name Of The Test S1Test 08 1
The Somewhat Long Name Of The Test S1Test 09 1
The Somewhat Long Name Of The Test S1Test 10 1
The Somewhat Long Name Of The Test S1Test 11 1
The Somewhat Long Name Of The Test S1Test 12 1
*** Keywords ***
Test1
[Arguments] ${arg}
Log Test
""",
"""
{
--test Test.The Somewhat Long Name Of The Test S1Test 01
--test Test.The Somewhat Long Name Of The Test S1Test 02
--test Test.The Somewhat Long Name Of The Test S1Test 03
--test Test.The Somewhat Long Name Of The Test S1Test 04
--test Test.The Somewhat Long Name Of The Test S1Test 05
--test Test.The Somewhat Long Name Of The Test S1Test 06
}
{
--test Test.The Somewhat Long Name Of The Test S1Test 07
--test Test.The Somewhat Long Name Of The Test S1Test 08
--test Test.The Somewhat Long Name Of The Test S1Test 09
--test Test.The Somewhat Long Name Of The Test S1Test 10
--test Test.The Somewhat Long Name Of The Test S1Test 11
--test Test.The Somewhat Long Name Of The Test S1Test 12
}
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
|
models/SelectionGAN/person_transfer/tool/rm_insnorm_running_vars.py | xianjian-xie/pose-generation | 445 | 4137 | import torch
ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth'
save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth'
states_dict = torch.load(ckp_path)
states_dict_new = states_dict.copy()
for key in states_dict.keys():
if "running_var" in key or "running_mean" in key:
del states_dict_new[key]
torch.save(states_dict_new, save_path) |
mistral/tests/unit/utils/test_utils.py | shubhamdang/mistral | 205 | 4170 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral import exceptions as exc
from mistral.tests.unit import base
from mistral.utils import ssh_utils
from mistral_lib import utils
class UtilsTest(base.BaseTest):
def test_itersubclasses(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(C):
pass
self.assertEqual([B, C, D], list(utils.iter_subclasses(A)))
def test_paramiko_to_private_key(self):
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"../dir"
)
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"..\\dir"
)
self.assertIsNone(
ssh_utils._to_paramiko_private_key(private_key_filename=None,
password='<PASSWORD>')
)
|
cors/resources/cors-makeheader.py | meyerweb/wpt | 14,668 | 4186 | import json
from wptserve.utils import isomorphic_decode
def main(request, response):
origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none')
if b"check" in request.GET:
token = request.GET.first(b"token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first(b"check", None) == b"keep":
request.server.stash.put(token, value)
body = u"1"
else:
body = u"0"
return [(b"Content-Type", b"text/plain")], body
if origin != b'none':
response.headers.set(b"Access-Control-Allow-Origin", origin)
if b'origin2' in request.GET:
response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2'))
#Preflight
if b'headers' in request.GET:
response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers'))
if b'credentials' in request.GET:
response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials'))
if b'methods' in request.GET:
response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods'))
code_raw = request.GET.first(b'code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == u'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if b'preflight' in request.GET:
code = int(request.GET.first(b'preflight'))
#Log that the preflight actually happened if we have an ident
if b'token' in request.GET:
request.server.stash.put(request.GET[b'token'], True)
if b'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set(b"Location", request.GET.first(b'location'))
headers = {}
for name, values in request.headers.items():
if len(values) == 1:
headers[isomorphic_decode(name)] = isomorphic_decode(values[0])
else:
#I have no idea, really
headers[name] = values
headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))
body = json.dumps(headers)
if code:
return (code, b"StatusText"), [], body
else:
return body
|
demos/interactive-classifier/config.py | jepabe/Demo_earth2 | 1,909 | 4194 | #!/usr/bin/env python
"""Handles Earth Engine service account configuration."""
import ee
# The service account email address authorized by your Google contact.
# Set up a service account as described in the README.
EE_ACCOUNT = '<EMAIL>'
# The private key associated with your service account in Privacy Enhanced
# Email format (.pem suffix). To convert a private key from the RSA format
# (.p12 suffix) to .pem, run the openssl command like this:
# openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem
EE_PRIVATE_KEY_FILE = 'privatekey.pem'
EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)
|
src/sqlfluff/rules/L024.py | NathanHowell/sqlfluff | 3,024 | 4198 | <gh_stars>1000+
"""Implementation of Rule L024."""
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
from sqlfluff.rules.L023 import Rule_L023
@document_fix_compatible
class Rule_L024(Rule_L023):
"""Single whitespace expected after USING in JOIN clause.
| **Anti-pattern**
.. code-block:: sql
SELECT b
FROM foo
LEFT JOIN zoo USING(a)
| **Best practice**
| The • character represents a space.
| Add a space after USING, to avoid confusing it
| for a function.
.. code-block:: sql
:force:
SELECT b
FROM foo
LEFT JOIN zoo USING•(a)
"""
expected_mother_segment_type = "join_clause"
pre_segment_identifier = ("name", "using")
post_segment_identifier = ("type", "bracketed")
expand_children = None
allow_newline = True
|
option_c.py | wrosecrans/colormap | 231 | 4210 |
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [-5.4895292543686764, 14.790571669586654, 82.5546687431056, 29.15531114139253, -4.1316769886951761, -13.002076438907238],
'yp': [-35.948168839230306, -42.273376159885785, -28.845467523197698, 52.03426124197, 36.832712600868973, 40.792291220556734],
'min_JK': 16.8314150305,
'max_JK': 95}
cm_data = [[ 5.03832136e-02, 2.98028976e-02, 5.27974883e-01],
[ 6.35363639e-02, 2.84259729e-02, 5.33123681e-01],
[ 7.53531234e-02, 2.72063728e-02, 5.38007001e-01],
[ 8.62217979e-02, 2.61253206e-02, 5.42657691e-01],
[ 9.63786097e-02, 2.51650976e-02, 5.47103487e-01],
[ 1.05979704e-01, 2.43092436e-02, 5.51367851e-01],
[ 1.15123641e-01, 2.35562500e-02, 5.55467728e-01],
[ 1.23902903e-01, 2.28781011e-02, 5.59423480e-01],
[ 1.32380720e-01, 2.22583774e-02, 5.63250116e-01],
[ 1.40603076e-01, 2.16866674e-02, 5.66959485e-01],
[ 1.48606527e-01, 2.11535876e-02, 5.70561711e-01],
[ 1.56420649e-01, 2.06507174e-02, 5.74065446e-01],
[ 1.64069722e-01, 2.01705326e-02, 5.77478074e-01],
[ 1.71573925e-01, 1.97063415e-02, 5.80805890e-01],
[ 1.78950212e-01, 1.92522243e-02, 5.84054243e-01],
[ 1.86212958e-01, 1.88029767e-02, 5.87227661e-01],
[ 1.93374449e-01, 1.83540593e-02, 5.90329954e-01],
[ 2.00445260e-01, 1.79015512e-02, 5.93364304e-01],
[ 2.07434551e-01, 1.74421086e-02, 5.96333341e-01],
[ 2.14350298e-01, 1.69729276e-02, 5.99239207e-01],
[ 2.21196750e-01, 1.64970484e-02, 6.02083323e-01],
[ 2.27982971e-01, 1.60071509e-02, 6.04867403e-01],
[ 2.34714537e-01, 1.55015065e-02, 6.07592438e-01],
[ 2.41396253e-01, 1.49791041e-02, 6.10259089e-01],
[ 2.48032377e-01, 1.44393586e-02, 6.12867743e-01],
[ 2.54626690e-01, 1.38820918e-02, 6.15418537e-01],
[ 2.61182562e-01, 1.33075156e-02, 6.17911385e-01],
[ 2.67702993e-01, 1.27162163e-02, 6.20345997e-01],
[ 2.74190665e-01, 1.21091423e-02, 6.22721903e-01],
[ 2.80647969e-01, 1.14875915e-02, 6.25038468e-01],
[ 2.87076059e-01, 1.08554862e-02, 6.27294975e-01],
[ 2.93477695e-01, 1.02128849e-02, 6.29490490e-01],
[ 2.99855122e-01, 9.56079551e-03, 6.31623923e-01],
[ 3.06209825e-01, 8.90185346e-03, 6.33694102e-01],
[ 3.12543124e-01, 8.23900704e-03, 6.35699759e-01],
[ 3.18856183e-01, 7.57551051e-03, 6.37639537e-01],
[ 3.25150025e-01, 6.91491734e-03, 6.39512001e-01],
[ 3.31425547e-01, 6.26107379e-03, 6.41315649e-01],
[ 3.37683446e-01, 5.61830889e-03, 6.43048936e-01],
[ 3.43924591e-01, 4.99053080e-03, 6.44710195e-01],
[ 3.50149699e-01, 4.38202557e-03, 6.46297711e-01],
[ 3.56359209e-01, 3.79781761e-03, 6.47809772e-01],
[ 3.62553473e-01, 3.24319591e-03, 6.49244641e-01],
[ 3.68732762e-01, 2.72370721e-03, 6.50600561e-01],
[ 3.74897270e-01, 2.24514897e-03, 6.51875762e-01],
[ 3.81047116e-01, 1.81356205e-03, 6.53068467e-01],
[ 3.87182639e-01, 1.43446923e-03, 6.54176761e-01],
[ 3.93304010e-01, 1.11388259e-03, 6.55198755e-01],
[ 3.99410821e-01, 8.59420809e-04, 6.56132835e-01],
[ 4.05502914e-01, 6.78091517e-04, 6.56977276e-01],
[ 4.11580082e-01, 5.77101735e-04, 6.57730380e-01],
[ 4.17642063e-01, 5.63847476e-04, 6.58390492e-01],
[ 4.23688549e-01, 6.45902780e-04, 6.58956004e-01],
[ 4.29719186e-01, 8.31008207e-04, 6.59425363e-01],
[ 4.35733575e-01, 1.12705875e-03, 6.59797077e-01],
[ 4.41732123e-01, 1.53984779e-03, 6.60069009e-01],
[ 4.47713600e-01, 2.07954744e-03, 6.60240367e-01],
[ 4.53677394e-01, 2.75470302e-03, 6.60309966e-01],
[ 4.59622938e-01, 3.57374415e-03, 6.60276655e-01],
[ 4.65549631e-01, 4.54518084e-03, 6.60139383e-01],
[ 4.71456847e-01, 5.67758762e-03, 6.59897210e-01],
[ 4.77343929e-01, 6.97958743e-03, 6.59549311e-01],
[ 4.83210198e-01, 8.45983494e-03, 6.59094989e-01],
[ 4.89054951e-01, 1.01269996e-02, 6.58533677e-01],
[ 4.94877466e-01, 1.19897486e-02, 6.57864946e-01],
[ 5.00677687e-01, 1.40550640e-02, 6.57087561e-01],
[ 5.06454143e-01, 1.63333443e-02, 6.56202294e-01],
[ 5.12206035e-01, 1.88332232e-02, 6.55209222e-01],
[ 5.17932580e-01, 2.15631918e-02, 6.54108545e-01],
[ 5.23632990e-01, 2.45316468e-02, 6.52900629e-01],
[ 5.29306474e-01, 2.77468735e-02, 6.51586010e-01],
[ 5.34952244e-01, 3.12170300e-02, 6.50165396e-01],
[ 5.40569510e-01, 3.49501310e-02, 6.48639668e-01],
[ 5.46157494e-01, 3.89540334e-02, 6.47009884e-01],
[ 5.51715423e-01, 4.31364795e-02, 6.45277275e-01],
[ 5.57242538e-01, 4.73307585e-02, 6.43443250e-01],
[ 5.62738096e-01, 5.15448092e-02, 6.41509389e-01],
[ 5.68201372e-01, 5.57776706e-02, 6.39477440e-01],
[ 5.73631859e-01, 6.00281369e-02, 6.37348841e-01],
[ 5.79028682e-01, 6.42955547e-02, 6.35126108e-01],
[ 5.84391137e-01, 6.85790261e-02, 6.32811608e-01],
[ 5.89718606e-01, 7.28775875e-02, 6.30407727e-01],
[ 5.95010505e-01, 7.71902878e-02, 6.27916992e-01],
[ 6.00266283e-01, 8.15161895e-02, 6.25342058e-01],
[ 6.05485428e-01, 8.58543713e-02, 6.22685703e-01],
[ 6.10667469e-01, 9.02039303e-02, 6.19950811e-01],
[ 6.15811974e-01, 9.45639838e-02, 6.17140367e-01],
[ 6.20918555e-01, 9.89336721e-02, 6.14257440e-01],
[ 6.25986869e-01, 1.03312160e-01, 6.11305174e-01],
[ 6.31016615e-01, 1.07698641e-01, 6.08286774e-01],
[ 6.36007543e-01, 1.12092335e-01, 6.05205491e-01],
[ 6.40959444e-01, 1.16492495e-01, 6.02064611e-01],
[ 6.45872158e-01, 1.20898405e-01, 5.98867442e-01],
[ 6.50745571e-01, 1.25309384e-01, 5.95617300e-01],
[ 6.55579615e-01, 1.29724785e-01, 5.92317494e-01],
[ 6.60374266e-01, 1.34143997e-01, 5.88971318e-01],
[ 6.65129493e-01, 1.38566428e-01, 5.85582301e-01],
[ 6.69845385e-01, 1.42991540e-01, 5.82153572e-01],
[ 6.74522060e-01, 1.47418835e-01, 5.78688247e-01],
[ 6.79159664e-01, 1.51847851e-01, 5.75189431e-01],
[ 6.83758384e-01, 1.56278163e-01, 5.71660158e-01],
[ 6.88318440e-01, 1.60709387e-01, 5.68103380e-01],
[ 6.92840088e-01, 1.65141174e-01, 5.64521958e-01],
[ 6.97323615e-01, 1.69573215e-01, 5.60918659e-01],
[ 7.01769334e-01, 1.74005236e-01, 5.57296144e-01],
[ 7.06177590e-01, 1.78437000e-01, 5.53656970e-01],
[ 7.10548747e-01, 1.82868306e-01, 5.50003579e-01],
[ 7.14883195e-01, 1.87298986e-01, 5.46338299e-01],
[ 7.19181339e-01, 1.91728906e-01, 5.42663338e-01],
[ 7.23443604e-01, 1.96157962e-01, 5.38980786e-01],
[ 7.27670428e-01, 2.00586086e-01, 5.35292612e-01],
[ 7.31862231e-01, 2.05013174e-01, 5.31600995e-01],
[ 7.36019424e-01, 2.09439071e-01, 5.27908434e-01],
[ 7.40142557e-01, 2.13863965e-01, 5.24215533e-01],
[ 7.44232102e-01, 2.18287899e-01, 5.20523766e-01],
[ 7.48288533e-01, 2.22710942e-01, 5.16834495e-01],
[ 7.52312321e-01, 2.27133187e-01, 5.13148963e-01],
[ 7.56303937e-01, 2.31554749e-01, 5.09468305e-01],
[ 7.60263849e-01, 2.35975765e-01, 5.05793543e-01],
[ 7.64192516e-01, 2.40396394e-01, 5.02125599e-01],
[ 7.68090391e-01, 2.44816813e-01, 4.98465290e-01],
[ 7.71957916e-01, 2.49237220e-01, 4.94813338e-01],
[ 7.75795522e-01, 2.53657797e-01, 4.91170517e-01],
[ 7.79603614e-01, 2.58078397e-01, 4.87539124e-01],
[ 7.83382636e-01, 2.62499662e-01, 4.83917732e-01],
[ 7.87132978e-01, 2.66921859e-01, 4.80306702e-01],
[ 7.90855015e-01, 2.71345267e-01, 4.76706319e-01],
[ 7.94549101e-01, 2.75770179e-01, 4.73116798e-01],
[ 7.98215577e-01, 2.80196901e-01, 4.69538286e-01],
[ 8.01854758e-01, 2.84625750e-01, 4.65970871e-01],
[ 8.05466945e-01, 2.89057057e-01, 4.62414580e-01],
[ 8.09052419e-01, 2.93491117e-01, 4.58869577e-01],
[ 8.12611506e-01, 2.97927865e-01, 4.55337565e-01],
[ 8.16144382e-01, 3.02368130e-01, 4.51816385e-01],
[ 8.19651255e-01, 3.06812282e-01, 4.48305861e-01],
[ 8.23132309e-01, 3.11260703e-01, 4.44805781e-01],
[ 8.26587706e-01, 3.15713782e-01, 4.41315901e-01],
[ 8.30017584e-01, 3.20171913e-01, 4.37835947e-01],
[ 8.33422053e-01, 3.24635499e-01, 4.34365616e-01],
[ 8.36801237e-01, 3.29104836e-01, 4.30905052e-01],
[ 8.40155276e-01, 3.33580106e-01, 4.27454836e-01],
[ 8.43484103e-01, 3.38062109e-01, 4.24013059e-01],
[ 8.46787726e-01, 3.42551272e-01, 4.20579333e-01],
[ 8.50066132e-01, 3.47048028e-01, 4.17153264e-01],
[ 8.53319279e-01, 3.51552815e-01, 4.13734445e-01],
[ 8.56547103e-01, 3.56066072e-01, 4.10322469e-01],
[ 8.59749520e-01, 3.60588229e-01, 4.06916975e-01],
[ 8.62926559e-01, 3.65119408e-01, 4.03518809e-01],
[ 8.66077920e-01, 3.69660446e-01, 4.00126027e-01],
[ 8.69203436e-01, 3.74211795e-01, 3.96738211e-01],
[ 8.72302917e-01, 3.78773910e-01, 3.93354947e-01],
[ 8.75376149e-01, 3.83347243e-01, 3.89975832e-01],
[ 8.78422895e-01, 3.87932249e-01, 3.86600468e-01],
[ 8.81442916e-01, 3.92529339e-01, 3.83228622e-01],
[ 8.84435982e-01, 3.97138877e-01, 3.79860246e-01],
[ 8.87401682e-01, 4.01761511e-01, 3.76494232e-01],
[ 8.90339687e-01, 4.06397694e-01, 3.73130228e-01],
[ 8.93249647e-01, 4.11047871e-01, 3.69767893e-01],
[ 8.96131191e-01, 4.15712489e-01, 3.66406907e-01],
[ 8.98983931e-01, 4.20391986e-01, 3.63046965e-01],
[ 9.01807455e-01, 4.25086807e-01, 3.59687758e-01],
[ 9.04601295e-01, 4.29797442e-01, 3.56328796e-01],
[ 9.07364995e-01, 4.34524335e-01, 3.52969777e-01],
[ 9.10098088e-01, 4.39267908e-01, 3.49610469e-01],
[ 9.12800095e-01, 4.44028574e-01, 3.46250656e-01],
[ 9.15470518e-01, 4.48806744e-01, 3.42890148e-01],
[ 9.18108848e-01, 4.53602818e-01, 3.39528771e-01],
[ 9.20714383e-01, 4.58417420e-01, 3.36165582e-01],
[ 9.23286660e-01, 4.63250828e-01, 3.32800827e-01],
[ 9.25825146e-01, 4.68103387e-01, 3.29434512e-01],
[ 9.28329275e-01, 4.72975465e-01, 3.26066550e-01],
[ 9.30798469e-01, 4.77867420e-01, 3.22696876e-01],
[ 9.33232140e-01, 4.82779603e-01, 3.19325444e-01],
[ 9.35629684e-01, 4.87712357e-01, 3.15952211e-01],
[ 9.37990034e-01, 4.92666544e-01, 3.12575440e-01],
[ 9.40312939e-01, 4.97642038e-01, 3.09196628e-01],
[ 9.42597771e-01, 5.02639147e-01, 3.05815824e-01],
[ 9.44843893e-01, 5.07658169e-01, 3.02433101e-01],
[ 9.47050662e-01, 5.12699390e-01, 2.99048555e-01],
[ 9.49217427e-01, 5.17763087e-01, 2.95662308e-01],
[ 9.51343530e-01, 5.22849522e-01, 2.92274506e-01],
[ 9.53427725e-01, 5.27959550e-01, 2.88883445e-01],
[ 9.55469640e-01, 5.33093083e-01, 2.85490391e-01],
[ 9.57468770e-01, 5.38250172e-01, 2.82096149e-01],
[ 9.59424430e-01, 5.43431038e-01, 2.78700990e-01],
[ 9.61335930e-01, 5.48635890e-01, 2.75305214e-01],
[ 9.63202573e-01, 5.53864931e-01, 2.71909159e-01],
[ 9.65023656e-01, 5.59118349e-01, 2.68513200e-01],
[ 9.66798470e-01, 5.64396327e-01, 2.65117752e-01],
[ 9.68525639e-01, 5.69699633e-01, 2.61721488e-01],
[ 9.70204593e-01, 5.75028270e-01, 2.58325424e-01],
[ 9.71835007e-01, 5.80382015e-01, 2.54931256e-01],
[ 9.73416145e-01, 5.85761012e-01, 2.51539615e-01],
[ 9.74947262e-01, 5.91165394e-01, 2.48151200e-01],
[ 9.76427606e-01, 5.96595287e-01, 2.44766775e-01],
[ 9.77856416e-01, 6.02050811e-01, 2.41387186e-01],
[ 9.79232922e-01, 6.07532077e-01, 2.38013359e-01],
[ 9.80556344e-01, 6.13039190e-01, 2.34646316e-01],
[ 9.81825890e-01, 6.18572250e-01, 2.31287178e-01],
[ 9.83040742e-01, 6.24131362e-01, 2.27937141e-01],
[ 9.84198924e-01, 6.29717516e-01, 2.24595006e-01],
[ 9.85300760e-01, 6.35329876e-01, 2.21264889e-01],
[ 9.86345421e-01, 6.40968508e-01, 2.17948456e-01],
[ 9.87332067e-01, 6.46633475e-01, 2.14647532e-01],
[ 9.88259846e-01, 6.52324832e-01, 2.11364122e-01],
[ 9.89127893e-01, 6.58042630e-01, 2.08100426e-01],
[ 9.89935328e-01, 6.63786914e-01, 2.04858855e-01],
[ 9.90681261e-01, 6.69557720e-01, 2.01642049e-01],
[ 9.91364787e-01, 6.75355082e-01, 1.98452900e-01],
[ 9.91984990e-01, 6.81179025e-01, 1.95294567e-01],
[ 9.92540939e-01, 6.87029567e-01, 1.92170500e-01],
[ 9.93031693e-01, 6.92906719e-01, 1.89084459e-01],
[ 9.93456302e-01, 6.98810484e-01, 1.86040537e-01],
[ 9.93813802e-01, 7.04740854e-01, 1.83043180e-01],
[ 9.94103226e-01, 7.10697814e-01, 1.80097207e-01],
[ 9.94323596e-01, 7.16681336e-01, 1.77207826e-01],
[ 9.94473934e-01, 7.22691379e-01, 1.74380656e-01],
[ 9.94553260e-01, 7.28727890e-01, 1.71621733e-01],
[ 9.94560594e-01, 7.34790799e-01, 1.68937522e-01],
[ 9.94494964e-01, 7.40880020e-01, 1.66334918e-01],
[ 9.94355411e-01, 7.46995448e-01, 1.63821243e-01],
[ 9.94140989e-01, 7.53136955e-01, 1.61404226e-01],
[ 9.93850778e-01, 7.59304390e-01, 1.59091984e-01],
[ 9.93482190e-01, 7.65498551e-01, 1.56890625e-01],
[ 9.93033251e-01, 7.71719833e-01, 1.54807583e-01],
[ 9.92505214e-01, 7.77966775e-01, 1.52854862e-01],
[ 9.91897270e-01, 7.84239120e-01, 1.51041581e-01],
[ 9.91208680e-01, 7.90536569e-01, 1.49376885e-01],
[ 9.90438793e-01, 7.96858775e-01, 1.47869810e-01],
[ 9.89587065e-01, 8.03205337e-01, 1.46529128e-01],
[ 9.88647741e-01, 8.09578605e-01, 1.45357284e-01],
[ 9.87620557e-01, 8.15977942e-01, 1.44362644e-01],
[ 9.86509366e-01, 8.22400620e-01, 1.43556679e-01],
[ 9.85314198e-01, 8.28845980e-01, 1.42945116e-01],
[ 9.84031139e-01, 8.35315360e-01, 1.42528388e-01],
[ 9.82652820e-01, 8.41811730e-01, 1.42302653e-01],
[ 9.81190389e-01, 8.48328902e-01, 1.42278607e-01],
[ 9.79643637e-01, 8.54866468e-01, 1.42453425e-01],
[ 9.77994918e-01, 8.61432314e-01, 1.42808191e-01],
[ 9.76264977e-01, 8.68015998e-01, 1.43350944e-01],
[ 9.74443038e-01, 8.74622194e-01, 1.44061156e-01],
[ 9.72530009e-01, 8.81250063e-01, 1.44922913e-01],
[ 9.70532932e-01, 8.87896125e-01, 1.45918663e-01],
[ 9.68443477e-01, 8.94563989e-01, 1.47014438e-01],
[ 9.66271225e-01, 9.01249365e-01, 1.48179639e-01],
[ 9.64021057e-01, 9.07950379e-01, 1.49370428e-01],
[ 9.61681481e-01, 9.14672479e-01, 1.50520343e-01],
[ 9.59275646e-01, 9.21406537e-01, 1.51566019e-01],
[ 9.56808068e-01, 9.28152065e-01, 1.52409489e-01],
[ 9.54286813e-01, 9.34907730e-01, 1.52921158e-01],
[ 9.51726083e-01, 9.41670605e-01, 1.52925363e-01],
[ 9.49150533e-01, 9.48434900e-01, 1.52177604e-01],
[ 9.46602270e-01, 9.55189860e-01, 1.50327944e-01],
[ 9.44151742e-01, 9.61916487e-01, 1.46860789e-01],
[ 9.41896120e-01, 9.68589814e-01, 1.40955606e-01],
[ 9.40015097e-01, 9.75158357e-01, 1.31325517e-01]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
examples/resources.py | willvousden/clint | 1,230 | 4249 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from clint import resources
resources.init('kennethreitz', 'clint')
lorem = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
print('%s created.' % resources.user.path)
resources.user.write('lorem.txt', lorem)
print('lorem.txt created')
assert resources.user.read('lorem.txt') == lorem
print('lorem.txt has correct contents')
resources.user.delete('lorem.txt')
print('lorem.txt deleted')
assert resources.user.read('lorem.txt') == None
print('lorem.txt deletion confirmed')
|
tests/components/airthings/test_config_flow.py | MrDelik/core | 30,023 | 4256 | <reponame>MrDelik/core<filename>tests/components/airthings/test_config_flow.py
"""Test the Airthings config flow."""
from unittest.mock import patch
import airthings
from homeassistant import config_entries
from homeassistant.components.airthings.const import CONF_ID, CONF_SECRET, DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
TEST_DATA = {
CONF_ID: "client_id",
CONF_SECRET: "secret",
}
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch("airthings.get_token", return_value="test_token",), patch(
"homeassistant.components.airthings.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Airthings"
assert result2["data"] == TEST_DATA
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=airthings.AirthingsAuthError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=airthings.AirthingsConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_flow_entry_already_exists(hass: HomeAssistant) -> None:
"""Test user input for config_entry that already exists."""
first_entry = MockConfigEntry(
domain="airthings",
data=TEST_DATA,
unique_id=TEST_DATA[CONF_ID],
)
first_entry.add_to_hass(hass)
with patch("airthings.get_token", return_value="token"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=TEST_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
|
src/oci/apm_traces/models/query_result_row_type_summary.py | Manny27nyc/oci-python-sdk | 249 | 4262 | <reponame>Manny27nyc/oci-python-sdk<filename>src/oci/apm_traces/models/query_result_row_type_summary.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryResultRowTypeSummary(object):
"""
A summary of the datatype, unit and related metadata of an individual row element of a query result row that is returned.
"""
def __init__(self, **kwargs):
"""
Initializes a new QueryResultRowTypeSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param data_type:
The value to assign to the data_type property of this QueryResultRowTypeSummary.
:type data_type: str
:param unit:
The value to assign to the unit property of this QueryResultRowTypeSummary.
:type unit: str
:param display_name:
The value to assign to the display_name property of this QueryResultRowTypeSummary.
:type display_name: str
:param expression:
The value to assign to the expression property of this QueryResultRowTypeSummary.
:type expression: str
:param query_result_row_type_summaries:
The value to assign to the query_result_row_type_summaries property of this QueryResultRowTypeSummary.
:type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
self.swagger_types = {
'data_type': 'str',
'unit': 'str',
'display_name': 'str',
'expression': 'str',
'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]'
}
self.attribute_map = {
'data_type': 'dataType',
'unit': 'unit',
'display_name': 'displayName',
'expression': 'expression',
'query_result_row_type_summaries': 'queryResultRowTypeSummaries'
}
self._data_type = None
self._unit = None
self._display_name = None
self._expression = None
self._query_result_row_type_summaries = None
@property
def data_type(self):
"""
Gets the data_type of this QueryResultRowTypeSummary.
Datatype of the query result row element.
:return: The data_type of this QueryResultRowTypeSummary.
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""
Sets the data_type of this QueryResultRowTypeSummary.
Datatype of the query result row element.
:param data_type: The data_type of this QueryResultRowTypeSummary.
:type: str
"""
self._data_type = data_type
@property
def unit(self):
"""
Gets the unit of this QueryResultRowTypeSummary.
Granular unit in which the query result row element's data is represented.
:return: The unit of this QueryResultRowTypeSummary.
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""
Sets the unit of this QueryResultRowTypeSummary.
Granular unit in which the query result row element's data is represented.
:param unit: The unit of this QueryResultRowTypeSummary.
:type: str
"""
self._unit = unit
@property
def display_name(self):
"""
Gets the display_name of this QueryResultRowTypeSummary.
Alias name if an alias is used for the query result row element or an assigned display name from the query language
in some default cases.
:return: The display_name of this QueryResultRowTypeSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this QueryResultRowTypeSummary.
Alias name if an alias is used for the query result row element or an assigned display name from the query language
in some default cases.
:param display_name: The display_name of this QueryResultRowTypeSummary.
:type: str
"""
self._display_name = display_name
@property
def expression(self):
"""
Gets the expression of this QueryResultRowTypeSummary.
Actual show expression in the user typed query that produced this column.
:return: The expression of this QueryResultRowTypeSummary.
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""
Sets the expression of this QueryResultRowTypeSummary.
Actual show expression in the user typed query that produced this column.
:param expression: The expression of this QueryResultRowTypeSummary.
:type: str
"""
self._expression = expression
@property
def query_result_row_type_summaries(self):
"""
Gets the query_result_row_type_summaries of this QueryResultRowTypeSummary.
A query result row type summary object that represents a nested table structure.
:return: The query_result_row_type_summaries of this QueryResultRowTypeSummary.
:rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
return self._query_result_row_type_summaries
@query_result_row_type_summaries.setter
def query_result_row_type_summaries(self, query_result_row_type_summaries):
"""
Sets the query_result_row_type_summaries of this QueryResultRowTypeSummary.
A query result row type summary object that represents a nested table structure.
:param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultRowTypeSummary.
:type: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
self._query_result_row_type_summaries = query_result_row_type_summaries
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
jaxformer/hf/sample.py | salesforce/CodeGen | 105 | 4263 | <gh_stars>100-1000
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import os
import re
import time
import random
import argparse
import torch
from transformers import GPT2TokenizerFast
from jaxformer.hf.codegen.modeling_codegen import CodeGenForCausalLM
########################################################################
# util
class print_time:
def __init__(self, desc):
self.desc = desc
def __enter__(self):
print(self.desc)
self.t = time.time()
def __exit__(self, type, value, traceback):
print(f'{self.desc} took {time.time()-self.t:.02f}s')
def set_env():
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def set_seed(seed, deterministic=True):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
# torch.use_deterministic_algorithms(deterministic)
def cast(model, fp16=True):
if fp16:
model.half()
return model
########################################################################
# model
def create_model(ckpt, fp16=True):
if fp16:
return CodeGenForCausalLM.from_pretrained(ckpt, revision='float16', torch_dtype=torch.float16, low_cpu_mem_usage=True)
else:
return CodeGenForCausalLM.from_pretrained(ckpt)
def create_tokenizer():
t = GPT2TokenizerFast.from_pretrained('gpt2')
t.max_model_input_sizes['gpt2'] = 1e20
return t
def include_whitespace(t, n_min=2, n_max=20, as_special_tokens=False):
t.add_tokens([' ' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens)
return t
def include_tabs(t, n_min=2, n_max=20, as_special_tokens=False):
t.add_tokens(['\t' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens)
return t
def create_custom_gpt2_tokenizer():
t = create_tokenizer()
t = include_whitespace(t=t, n_min=2, n_max=32, as_special_tokens=False)
t = include_tabs(t=t, n_min=2, n_max=10, as_special_tokens=False)
return t
########################################################################
# sample
def sample(
device,
model,
tokenizer,
context,
pad_token_id,
num_return_sequences=1,
temp=0.2,
top_p=0.95,
max_length_sample=128,
max_length=2048
):
input_ids = tokenizer(
context,
truncation=True,
padding=True,
max_length=max_length,
return_tensors='pt',
).input_ids
input_ids_len = input_ids.shape[1]
assert input_ids_len < max_length
with torch.no_grad():
input_ids = input_ids.to(device)
tokens = model.generate(
input_ids,
do_sample=True,
num_return_sequences=num_return_sequences,
temperature=temp,
max_length=input_ids_len + max_length_sample,
top_p=top_p,
pad_token_id=pad_token_id,
use_cache=True,
)
text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...])
return text
def truncate(completion):
def find_re(string, pattern, start_pos):
m = pattern.search(string, start_pos)
return m.start() if m else -1
terminals = [
re.compile(r, re.MULTILINE)
for r in
[
'^#',
re.escape('<|endoftext|>'),
"^'''",
'^"""',
'\n\n\n'
]
]
prints = list(re.finditer('^print', completion, re.MULTILINE))
if len(prints) > 1:
completion = completion[:prints[1].start()]
defs = list(re.finditer('^def', completion, re.MULTILINE))
if len(defs) > 1:
completion = completion[:defs[1].start()]
start_pos = 0
terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1]
if len(terminals_pos) > 0:
return completion[:min(terminals_pos)]
else:
return completion
def test_truncate():
assert truncate('\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#') == '\nif len_a > len_b:\n result = a\nelse:\n result = b'
########################################################################
# main
def main():
# (0) constants
models_nl = ['codegen-350M-nl', 'codegen-2B-nl', 'codegen-6B-nl', 'codegen-16B-nl']
models_pl = ['codegen-350M-multi', 'codegen-2B-multi', 'codegen-6B-multi', 'codegen-16B-multi', 'codegen-350M-mono', 'codegen-2B-mono', 'codegen-6B-mono', 'codegen-16B-mono']
models = models_nl + models_pl
# (1) params
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, choices=models, default='codegen-350M-mono')
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--rng-seed', type=int, default=42)
parser.add_argument('--rng-deterministic', type=bool, default=True)
parser.add_argument('--p', type=float, default=0.95)
parser.add_argument('--t', type=float, default=0.2)
parser.add_argument('--max-length', type=int, default=128)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--no-fp16', action="store_false")
parser.add_argument('--pad', type=int, default=50256)
parser.add_argument('--context', type=str, default='def helloworld():')
args = parser.parse_args()
# (2) preamble
set_env()
set_seed(args.rng_seed, deterministic=args.rng_deterministic)
device = torch.device(args.device)
if device.type == "cpu":
args.no_fp16 = False
if args.model.startswith("codegen-16B"):
args.no_fp16 = True
ckpt = f'./checkpoints/{args.model}'
# (3) load
with print_time('loading parameters'):
model = create_model(ckpt=ckpt, fp16=args.no_fp16).to(device)
with print_time('loading tokenizer'):
if args.model in models_pl:
tokenizer = create_custom_gpt2_tokenizer()
else:
tokenizer = create_tokenizer()
tokenizer.padding_side = 'left'
tokenizer.pad_token = args.pad
# (4) sample
with print_time('sampling'):
completion = sample(device=device, model=model, tokenizer=tokenizer, context=args.context, pad_token_id=args.pad, num_return_sequences=args.batch_size, temp=args.t, top_p=args.p, max_length_sample=args.max_length)[0]
truncation = truncate(completion)
print('=' * 100)
print(completion)
print('=' * 100)
print(args.context+truncation)
print('=' * 100)
if __name__ == '__main__':
test_truncate()
main()
print('done.')
|
retrain_with_rotnet.py | ericdaat/self-label | 440 | 4265 | import argparse
import warnings
warnings.simplefilter("ignore", UserWarning)
import files
from tensorboardX import SummaryWriter
import os
import numpy as np
import time
import torch
import torch.optim
import torch.nn as nn
import torch.utils.data
import torchvision
import torchvision.transforms as tfs
from data import DataSet,return_model_loader
from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage
def RotationDataLoader(image_dir, is_validation=False,
batch_size=256, crop_size=224, num_workers=4,shuffle=True):
normalize = tfs.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transforms = tfs.Compose([
tfs.RandomResizedCrop(crop_size),
tfs.RandomGrayscale(p=0.2),
tfs.ColorJitter(0.4, 0.4, 0.4, 0.4),
tfs.RandomHorizontalFlip(),
tfs.Lambda(lambda img: torch.stack([normalize(tfs.ToTensor()(
tfs.functional.rotate(img, angle))) for angle in [0, 90, 180, 270]]
))
])
if is_validation:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/val', transforms))
else:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/train', transforms))
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
drop_last=False
)
return loader
class Optimizer:
def __init__(self):
self.num_epochs = 30
self.lr = 0.05
self.lr_schedule = lambda epoch: (self.lr * (0.1 ** (epoch//args.lrdrop)))*(epoch<80) + (epoch>=80)*self.lr*(0.1**3)
self.momentum = 0.9
self.weight_decay = 10**(-5)
self.resume = True
self.checkpoint_dir = None
self.writer = None
self.K = args.ncl
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.val_loader = RotationDataLoader(args.imagenet_path, is_validation=True,
batch_size=args.batch_size, num_workers=args.workers,shuffle=True)
def optimize_epoch(self, model, optimizer, loader, epoch, validation=False):
print(f"Starting epoch {epoch}, validation: {validation} " + "="*30)
loss_value = AverageMeter()
rotacc_value = AverageMeter()
# house keeping
if not validation:
model.train()
lr = self.lr_schedule(epoch)
for pg in optimizer.param_groups:
pg['lr'] = lr
else:
model.eval()
XE = torch.nn.CrossEntropyLoss().to(self.dev)
l_dl = 0 # len(loader)
now = time.time()
batch_time = MovingAverage(intertia=0.9)
for iter, (data, label, selected) in enumerate(loader):
now = time.time()
if not validation:
niter = epoch * len(loader.dataset) + iter*args.batch_size
data = data.to(self.dev)
mass = data.size(0)
where = np.arange(mass,dtype=int) * 4
data = data.view(mass * 4, 3, data.size(3), data.size(4))
rotlabel = torch.tensor(range(4)).view(-1, 1).repeat(mass, 1).view(-1).to(self.dev)
#################### train CNN ###########################################
if not validation:
final = model(data)
if args.onlyrot:
loss = torch.Tensor([0]).to(self.dev)
else:
if args.hc == 1:
loss = XE(final[0][where], self.L[selected])
else:
loss = torch.mean(torch.stack([XE(final[k][where], self.L[k, selected]) for k in range(args.hc)]))
rotloss = XE(final[-1], rotlabel)
pred = torch.argmax(final[-1], 1)
total_loss = loss + rotloss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
correct = (pred == rotlabel).to(torch.float)
rotacc = correct.sum() / float(mass)
else:
final = model(data)
pred = torch.argmax(final[-1], 1)
correct = (pred == rotlabel.cuda()).to(torch.float)
rotacc = correct.sum() / float(mass)
total_loss = torch.Tensor([0])
loss = torch.Tensor([0])
rotloss = torch.Tensor([0])
rotacc_value.update(rotacc.item(), mass)
loss_value.update(total_loss.item(), mass)
batch_time.update(time.time() - now)
now = time.time()
print(
f"Loss: {loss_value.avg:03.3f}, RotAcc: {rotacc_value.avg:03.3f} | {epoch: 3}/{iter:05}/{l_dl:05} Freq: {mass / batch_time.avg:04.1f}Hz:",
end='\r', flush=True)
# every few iter logging
if (iter % args.logiter == 0):
if not validation:
print(niter, " Loss: {0:.3f}".format(loss.item()), flush=True)
with torch.no_grad():
if not args.onlyrot:
pred = torch.argmax(final[0][where], dim=1)
pseudoloss = XE(final[0][where], pred)
if not args.onlyrot:
self.writer.add_scalar('Pseudoloss', pseudoloss.item(), niter)
self.writer.add_scalar('lr', self.lr_schedule(epoch), niter)
self.writer.add_scalar('Loss', loss.item(), niter)
self.writer.add_scalar('RotLoss', rotloss.item(), niter)
self.writer.add_scalar('RotAcc', rotacc.item(), niter)
if iter > 0:
self.writer.add_scalar('Freq(Hz)', mass/(time.time() - now), niter)
# end of epoch logging
if self.writer and (epoch % self.log_interval == 0):
write_conv(self.writer, model, epoch)
if validation:
print('val Rot-Acc: ', rotacc_value.avg)
self.writer.add_scalar('val Rot-Acc', rotacc_value.avg, epoch)
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch,lowest=False)
return {'loss': loss_value.avg}
def optimize(self, model, train_loader):
"""Perform full optimization."""
first_epoch = 0
model = model.to(self.dev)
self.optimize_times = [0]
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
weight_decay=self.weight_decay,
momentum=self.momentum,
lr=self.lr)
if self.checkpoint_dir is not None and self.resume:
self.L, first_epoch = files.load_checkpoint_all(self.checkpoint_dir, model=None, opt=None)
print('loaded from: ', self.checkpoint_dir,flush=True)
print('first five entries of L: ', self.L[:5], flush=True)
print('found first epoch to be', first_epoch, flush=True)
first_epoch = 0
self.optimize_times = [0]
self.L = self.L.cuda()
print("model.headcount ", model.headcount, flush=True)
#####################################################################################
# Perform optmization ###############################################################
lowest_loss = 1e9
epoch = first_epoch
while epoch < (self.num_epochs+1):
if not args.val_only:
m = self.optimize_epoch(model, optimizer, train_loader, epoch, validation=False)
if m['loss'] < lowest_loss:
lowest_loss = m['loss']
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch, lowest=True)
else:
print('='*30 +' doing only validation ' + "="*30)
epoch = self.num_epochs
m = self.optimize_epoch(model, optimizer, self.val_loader, epoch, validation=True)
epoch += 1
print(f"Model optimization completed. Saving final model to {os.path.join(self.checkpoint_dir, 'model_final.pth.tar')}")
torch.save(model, os.path.join(self.checkpoint_dir, 'model_final.pth.tar'))
return model
def get_parser():
parser = argparse.ArgumentParser(description='Retrain with given labels combined with RotNet loss')
# optimizer
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of epochs')
parser.add_argument('--batch-size', default=64, type=int, metavar='BS', help='batch size')
parser.add_argument('--lr', default=0.05, type=float, metavar='FLOAT', help='initial learning rate')
parser.add_argument('--lrdrop', default=30, type=int, metavar='INT', help='multiply LR by 0.1 every')
# architecture
parser.add_argument('--arch', default='alexnet', type=str, help='alexnet or resnet')
parser.add_argument('--archspec', default='big', type=str, help='big or small for alexnet ')
parser.add_argument('--ncl', default=1000, type=int, metavar='INT', help='number of clusters')
parser.add_argument('--hc', default=1, type=int, metavar='INT', help='number of heads')
parser.add_argument('--init', default=False, action='store_true', help='initialization of network to PyTorch 0.4')
# what we do in this code
parser.add_argument('--val-only', default=False, action='store_true', help='if we run only validation set')
parser.add_argument('--onlyrot', default=False, action='store_true', help='if train only RotNet')
# housekeeping
parser.add_argument('--data', default="Imagenet", type=str)
parser.add_argument('--device', default="0", type=str, metavar='N', help='GPU device')
parser.add_argument('--exp', default='./rot-retrain', metavar='DIR', help='path to result dirs')
parser.add_argument('--workers', default=6, type=int, metavar='N', help='number workers (default: 6)')
parser.add_argument('--imagenet-path', default='/home/ubuntu/data/imagenet', type=str, help='')
parser.add_argument('--comment', default='rot-retrain', type=str, help='comment for tensorboardX')
parser.add_argument('--log-interval', default=1, type=int, metavar='INT', help='save stuff every x epochs')
parser.add_argument('--logiter', default=200, type=int, metavar='INT', help='log every x-th batch')
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
name = "%s" % args.comment.replace('/', '_')
try:
args.device = [int(item) for item in args.device.split(',')]
except AttributeError:
args.device = [int(args.device)]
setup_runtime(seed=42, cuda_dev_id=args.device)
print(args, flush=True)
print()
print(name,flush=True)
writer = SummaryWriter('./runs/%s/%s'%(args.data,name))
writer.add_text('args', " \n".join(['%s %s' % (arg, getattr(args, arg)) for arg in vars(args)]))
# Setup model and train_loader
print('Commencing!', flush=True)
model, train_loader = return_model_loader(args)
train_loader = RotationDataLoader(args.imagenet_path, is_validation=False,
crop_size=224, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True)
# add additional head to the network for RotNet loss.
if args.arch == 'alexnet':
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(4096, args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(4096, 4))
else:
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(2048*int(args.archspec), args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(2048*int(args.archspec), 4))
if args.init:
for mod in model.modules():
mod.apply(weight_init)
# Setup optimizer
o = Optimizer()
o.writer = writer
o.lr = args.lr
o.num_epochs = args.epochs
o.resume = True
o.log_interval = args.log_interval
o.checkpoint_dir = os.path.join(args.exp, 'checkpoints')
# Optimize
o.optimize(model, train_loader)
|
API-Reference-Code-Generator.py | sawyercade/Documentation | 116 | 4308 | import pathlib
import yaml
documentations = {"Our Platform": "QuantConnect-Platform-2.0.0.yaml",
"Alpha Streams": "QuantConnect-Alpha-0.8.yaml"}
def RequestTable(api_call, params):
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>'
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n'
for item in params:
example_ = "/"
description_ = "Optional. " if "required" not in item or not item["required"] else ""
description_ += item["description"]
if description_[-1] != ".":
description_ += "."
if "type" in item["schema"]:
type_ = item["schema"]["type"]
else:
type_ = item["schema"]["$ref"].split("/")[-1]
if "minimum" in item["schema"]:
description_ += f' Minimum: {item["schema"]["minimum"]}'
example_ = item["schema"]["minimum"]
elif "maximum" in item["schema"]:
description_ += f' Maximum: {item["schema"]["maximum"]}'
example_ = item["schema"]["maximum"]
elif "default" in item["schema"]:
description_ += f' Default: {item["schema"]["default"]}'
example_ = item["schema"]["default"]
if type_ == "array":
array_obj = item["schema"]["items"]
if "$ref" in array_obj:
type_ = array_obj["$ref"].split("/")[-1] + " Array"
ref = array_obj["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
request_object_ = doc
for path in ref:
request_object_ = request_object_[path]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
example_, __, __ = ExampleWriting(request_object_properties_, [], 1)
if "type" in array_obj:
type_ = array_obj["type"] + " Array"
if "enum" in array_obj:
type_ = type_ + " Enum"
description_ += f' Options: {str(array_obj["enum"])}'
example_ = f'"{array_obj["enum"][0]}"'
if "Enum" not in type_:
if "string" in type_:
example_ = '"string"'
elif "number" in type_ or "integer" in type_:
example_ = '0'
elif "boolean" in type_:
example_ = 'true'
writeUp += f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>'
example += f' "{item["name"]}": {example_},\n'
return writeUp + example + "\b}</pre>\n</div>\n</td>\n</tr>\n</table>"
def ResponseTable(requestBody):
writeUp = ""
array = False
order = 0
if "content" in requestBody:
component = requestBody["content"]["application/json"]["schema"]
if "$ref" in component:
component = component["$ref"].split("/")[1:]
elif "items" in component and "$ref" in component["items"]:
component = component["items"]["$ref"].split("/")[1:]
array = True
order += 1
else:
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2">{requestBody["description"]}</th>\n'
writeUp += '</tr>\n</thead>\n'
writeUp += f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n'
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += f'[\n "{component["items"]["example"]}"\n]'
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
return writeUp
else:
component = requestBody["$ref"].split("/")[1:]
item_list = [component]
i = 0
while i < len(item_list):
request_object = doc
for item in item_list[i]:
request_object = request_object[item]
if "items" in request_object and "oneOf" in request_object["items"]:
prop = request_object["items"]["oneOf"]
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n ['
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n'
writeUp += '</tr>\n</thead>'
for y in prop:
path = y["$ref"].split("/")[1:]
name = path[-1]
enum = ""
item_list.append(path)
request_object = doc
for item in path:
request_object = request_object[item]
if "enum" in request_object:
enum = " Options: " + str(request_object["enum"])
description_ = request_object["description"]
if description_[-1] != ".":
description_ += "."
writeUp += f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_ + enum}</td>\n</tr>\n'
if "example" in request_object:
text = request_object["example"]
elif "enum" in request_object:
text = '"' + request_object["enum"][0] + '"'
example += f'\n {text},'
example += '\b\n ]\n]'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
continue
elif "oneOf" in request_object:
for y in request_object["oneOf"]:
item_list.append(y["$ref"].split("/")[1:])
i += 1
continue
elif "properties" in request_object:
request_object_properties = request_object["properties"]
elif "content" in request_object:
item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:])
i += 1
continue
elif "type" in request_object and "properties" not in request_object:
request_object_properties = {item: request_object}
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
if "description" in request_object:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n'
else:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n'
writeUp += '</tr>\n</thead>\n'
example, html_property, item_list = ExampleWriting(request_object_properties, item_list, array, order)
if array:
array = False
order -= 1
for line in html_property:
writeUp += line
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
return writeUp
def ExampleWriting(request_object_properties, item_list, array=False, order=0):
tab = " " * order
if array:
example = "[\n {\n"
else:
example = "{\n"
line = []
for name, properties in request_object_properties.items():
type_ = properties["type"] if "type" in properties else "object"
description_ = properties["description"] if "description" in properties else "/"
if (example != "{\n" and not array) or (example != "[\n {\n" and array):
example += ",\n"
example_ = tab + f' "{name}": '
if type_ == "array":
example_ += '[\n'
if "type" in properties["items"]:
type_ = properties["items"]["type"] + " Array"
example_ += tab + f' "{properties["items"]["type"]}"'
elif "$ref" in properties["items"]:
ref = properties["items"]["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+2)
example_ += tab + " " * 2 + write_up
elif type_ == "object":
if "additionalProperties" in properties:
add_prop = properties["additionalProperties"]
if "type" in add_prop:
prop_type = add_prop["type"]
if "format" in prop_type:
type_ = prop_type + f'$({prop_type["format"]})' + " object"
if prop_type["format"] == "date-time":
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += "0"
else:
type_ = prop_type + " object"
example_ += f'"{prop_type}"'
elif "$ref" in add_prop:
ref = add_prop["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "$ref" in properties:
ref = properties["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "type" in request_object_:
properties = request_object_properties_ = request_object_
type_ = request_object_["type"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
elif type_ == "integer" or type_ == "number":
example_ += "0"
elif type_ == "boolean":
example_ += "true"
elif type_ == "string":
if "format" in properties:
type_ += f'(${properties["format"]})'
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += '"string"'
if description_[-1] != ".":
description_ += "."
if "enum" in properties:
type_ += " Enum"
description_ += f' Options : {properties["enum"]}'
if "string" in type_:
example_ = tab + f' "{name}": "{properties["enum"][0]}"'
else:
example_ = tab + f' "{name}": {properties["enum"][0]}'
if "example" in properties:
eg = properties["example"]
type_ += f'<br/><i><sub>example: {eg}</sub></i>'
if isinstance(eg, str):
eg = '"' + eg + '"'
example_ = tab + f' "{name}": {eg}'
if "Array" in type_:
example_ += "\n" + tab + " ]"
if order == 0 or array:
line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n')
example += example_
if not array:
return example + "\n" + tab + "}", line, item_list
return example + "\n" + tab + "}\n" + " " * (order-1) + "]", line, item_list
for section, source in documentations.items():
yaml_file = open(source)
doc = yaml.load(yaml_file, Loader=yaml.Loader)
paths = doc["paths"]
for api_call, result in paths.items():
j = 1
content = result["post"] if "post" in result else result["get"]
# Create path if not exist
destination_folder = pathlib.Path("/".join(content["tags"]))
destination_folder.mkdir(parents=True, exist_ok=True)
# Create Introduction part
with open(destination_folder / f'{j:02} Introduction.html', "w") as html_file:
html_file.write("<p>\n")
html_file.write(f"{content['summary']}\n")
html_file.write("</p>\n")
j += 1
# Create Description part if having one
if "description" in content:
with open(destination_folder / f'{j:02} Description.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'{content["description"]}\n')
html_file.write('</p>\n')
j += 1
# Create Request part
with open(destination_folder / f'{j:02} Request.html', "w") as html_file:
description_ = ""
if "parameters" in content:
writeUp = RequestTable(api_call, content["parameters"])
elif "requestBody" in content:
if "description" in content["requestBody"]:
description_ = str(content["requestBody"]["description"])
if description_[-1] != ".":
description_ += "."
description_ += " "
writeUp = ResponseTable(content["requestBody"])
else:
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n'
writeUp += f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>'
description_ += f'The <code>{api_call}</code> API accepts requests in the following format:\n'
html_file.write("<p>\n" + description_ + "</p>\n")
html_file.write(writeUp)
j += 1
# Create Response part
with open(destination_folder / f'{j:02} Responses.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n')
html_file.write('</p>\n')
request_body = content["responses"]
for code, properties in request_body.items():
if code == "200":
html_file.write('<h4>200 Success</h4>\n')
elif code == "401":
html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n')
html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n')
html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n')
continue
elif code == "404":
html_file.write('<h4>404 Not Found Error</h4>\n')
html_file.write('<p>The requested item, index, page was not found.</p>\n')
continue
elif code == "default":
html_file.write('<h4>Default Generic Error</h4>\n')
writeUp = ResponseTable(properties)
html_file.write(writeUp)
print(f"Documentation of {section} is generated and inplace!") |
test/core/024-sc4-gridftp-http/Rosetta.py | ahnitz/pegasus | 127 | 4329 | <reponame>ahnitz/pegasus
#!/usr/bin/env python3
import logging
import sys
import subprocess
from pathlib import Path
from datetime import datetime
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
# --- Work Dir Setup -----------------------------------------------------------
RUN_ID = "024-sc4-gridftp-http-" + datetime.now().strftime("%s")
TOP_DIR = Path.cwd()
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Configuration ------------------------------------------------------------
print("Generating pegasus.properties at: {}".format(TOP_DIR / "pegasus.properties"))
props = Properties()
props["pegasus.dir.useTimestamp"] = "true"
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.data.configuration"] = "nonsharedfs"
with (TOP_DIR / "pegasus.properties").open(mode="w") as f:
props.write(f)
# --- Sites --------------------------------------------------------------------
print("Generating site catalog at: sites.yml")
LOCAL = "local"
CONDOR_POOL = "condorpool"
STAGING_SITE = "staging_site"
try:
pegasus_config = subprocess.run(
["pegasus-config", "--bin"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except FileNotFoundError as e:
print("Unable to find pegasus-config")
assert pegasus_config.returncode == 0
PEGASUS_BIN_DIR = pegasus_config.stdout.decode().strip()
sites = """
pegasus: "5.0"
sites:
-
name: "condor_pool"
arch: "x86_64"
os.type: "linux"
profiles:
condor:
universe: "vanilla"
pegasus:
style: "condor"
-
name: "staging_site"
arch: "x86_64"
os.type: "linux"
directories:
-
type: "sharedScratch"
path: "/lizard/scratch-90-days/http-scratch/ptesting"
fileServers:
-
operation: "get"
url: "http://workflow.isi.edu/shared-scratch/ptesting"
-
operation: "put"
url: "gsiftp://workflow.isi.edu/lizard/scratch-90-days/http-scratch/ptesting"
-
name: "local"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
directories:
-
type: "sharedScratch"
path: "{work_dir}/scratch"
fileServers:
-
operation: "all"
url: "file://{work_dir}/scratch"
-
type: "localStorage"
path: "{work_dir}/outputs"
fileServers:
-
operation: "all"
url: "file://{work_dir}/outputs"
profiles:
env:
PEGASUS_BIN_DIR: "{pegasus_bin_dir}"
""".format(
work_dir=str(WORK_DIR), pegasus_bin_dir=PEGASUS_BIN_DIR
)
with (TOP_DIR / "sites.yml").open(mode="w") as f:
f.write(sites)
# --- Transformations ----------------------------------------------------------
rosetta_exe = Transformation(
"rosetta.exe",
arch=Arch.X86_64,
os_type=OS.LINUX,
site="local",
pfn="file://" + str(TOP_DIR / "rosetta.exe"),
is_stageable=True,
).add_pegasus_profile(clusters_size=3)
tc = TransformationCatalog().add_transformations(rosetta_exe)
# --- Replicas & Workflow ------------------------------------------------------
rc = ReplicaCatalog()
# add all files in minirosetta_database
inputs = list()
def get_files(d: Path) -> None:
for p in d.iterdir():
if p.is_file():
f = File(str(p))
inputs.append(f)
rc.add_replica(LOCAL, str(p), str(p.resolve()))
else:
get_files(p)
get_files(Path("minirosetta_database"))
f1 = File("design.resfile")
inputs.append(f1)
rc.add_replica(LOCAL, f1, str(Path("design.resfile").resolve()))
f2 = File("repack.resfile")
inputs.append(f2)
rc.add_replica(LOCAL, f2, str(Path("repack.resfile").resolve()))
wf = Workflow("rosetta")
pdb_files = list(Path("pdbs").iterdir())
for i in range(10):
current_file = pdb_files[i]
if current_file.is_file():
job = (
Job(rosetta_exe, _id=current_file.name.replace(".pdb", ""))
.add_inputs(File(current_file.name), *inputs)
.add_outputs(File(current_file.name + ".score.sc"), register_replica=True)
.add_args(
"-in:file:s",
current_file.name,
"-out:prefix " + current_file.name + ".",
"-database ./minirosetta_database",
"-linmem_ig 10",
"-nstruct 1",
"-pert_num 2",
"-inner_num 1",
"-jd2::ntrials 1",
)
)
rc.add_replica("local", current_file.name, str(current_file.resolve()))
wf.add_jobs(job)
# write rc to separate file for registration jobs
with (TOP_DIR / "replicas.yml").open("w") as f:
rc.write(f)
wf.add_transformation_catalog(tc)
try:
wf.plan(
dir=str(WORK_DIR),
verbose=5,
sites=[CONDOR_POOL],
staging_sites={CONDOR_POOL: STAGING_SITE},
)
except PegasusClientError as e:
print(e.output)
|
ludwig/data/cache/manager.py | ludwig-ai/ludw | 970 | 4337 | import logging
import os
import re
import uuid
from pathlib import Path
from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION
from ludwig.data.cache.util import calculate_checksum
from ludwig.utils import data_utils
from ludwig.utils.fs_utils import delete, path_exists
logger = logging.getLogger(__name__)
def alphanum(v):
"""Filters a string to only its alphanumeric characters."""
return re.sub(r"\W+", "", v)
class DatasetCache:
def __init__(self, config, checksum, cache_map, dataset_manager):
self.config = config
self.checksum = checksum
self.cache_map = cache_map
self.dataset_manager = dataset_manager
def get(self):
training_set_metadata_fp = self.cache_map[META]
if not path_exists(training_set_metadata_fp):
return None
cache_training_set_metadata = data_utils.load_json(training_set_metadata_fp)
cached_training_set = self.cache_map[TRAINING] if path_exists(self.cache_map[TRAINING]) else None
cached_test_set = self.cache_map[TEST] if path_exists(self.cache_map[TEST]) else None
cached_validation_set = self.cache_map[VALIDATION] if path_exists(self.cache_map[VALIDATION]) else None
valid = self.checksum == cache_training_set_metadata.get(CHECKSUM) and cached_training_set is not None
return valid, cache_training_set_metadata, cached_training_set, cached_test_set, cached_validation_set
def put(self, training_set, test_set, validation_set, training_set_metadata):
logger.info("Writing preprocessed training set cache")
training_set = self.dataset_manager.save(
self.cache_map[TRAINING],
training_set,
self.config,
training_set_metadata,
TRAINING,
)
if test_set is not None:
logger.info("Writing preprocessed test set cache")
test_set = self.dataset_manager.save(
self.cache_map[TEST],
test_set,
self.config,
training_set_metadata,
TEST,
)
if validation_set is not None:
logger.info("Writing preprocessed validation set cache")
validation_set = self.dataset_manager.save(
self.cache_map[VALIDATION],
validation_set,
self.config,
training_set_metadata,
VALIDATION,
)
logger.info("Writing train set metadata")
data_utils.save_json(self.cache_map[META], training_set_metadata)
return training_set, test_set, validation_set, training_set_metadata
def delete(self):
for fname in self.cache_map.values():
if path_exists(fname):
delete(fname)
class CacheManager:
def __init__(self, dataset_manager, cache_dir=None):
self._dataset_manager = dataset_manager
self._cache_dir = cache_dir
def get_dataset_cache(self, config, dataset=None, training_set=None, test_set=None, validation_set=None):
if dataset is not None:
key = self.get_cache_key(dataset, config)
cache_map = {
META: self.get_cache_path(dataset, key, META, "json"),
TRAINING: self.get_cache_path(dataset, key, TRAINING),
TEST: self.get_cache_path(dataset, key, TEST),
VALIDATION: self.get_cache_path(dataset, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
else:
key = self.get_cache_key(training_set, config)
cache_map = {
META: self.get_cache_path(training_set, key, META, "json"),
TRAINING: self.get_cache_path(training_set, key, TRAINING),
TEST: self.get_cache_path(test_set, key, TEST),
VALIDATION: self.get_cache_path(validation_set, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
def get_cache_key(self, dataset, config):
if not isinstance(dataset, str):
# TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask
return str(uuid.uuid1())
return calculate_checksum(dataset, config)
def get_cache_path(self, dataset, key, tag, ext=None):
if not isinstance(dataset, str):
dataset = None
if self._cache_dir is None and dataset is not None:
# Use the input dataset filename (minus the extension) as the cache path
stem = Path(dataset).stem
else:
# To avoid collisions across different directories, we use the unique checksum
# as the cache path
stem = alphanum(key)
ext = ext or self.data_format
cache_fname = f"{stem}.{tag}.{ext}"
return os.path.join(self.get_cache_directory(dataset), cache_fname)
def get_cache_directory(self, input_fname):
if self._cache_dir is None:
if input_fname is not None:
return os.path.dirname(input_fname)
return "."
return self._cache_dir
def can_cache(self, skip_save_processed_input):
return self._dataset_manager.can_cache(skip_save_processed_input)
@property
def data_format(self):
return self._dataset_manager.data_format
|
guillotina/contrib/workflows/events.py | rboixaderg/guillotina | 173 | 4351 | from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent
from guillotina.events import ObjectEvent
from zope.interface import implementer
@implementer(IWorkflowChangedEvent)
class WorkflowChangedEvent(ObjectEvent):
"""An object has been moved"""
def __init__(self, object, workflow, action, comments):
ObjectEvent.__init__(self, object)
self.object = object
self.workflow = workflow
self.action = action
self.comments = comments
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py | google-ar/chromium | 2,151 | 4363 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState
from telemetry.page import page as page_module
from telemetry import story
class HTML5TestPage(page_module.Page):
def __init__(self, url, page_set):
super(HTML5TestPage, self).__init__(url=url, page_set=page_set,
shared_page_state_class=ChromeProxySharedPageState)
class HTML5TestStorySet(story.StorySet):
""" Chrome proxy test page for traffic over https. """
def __init__(self):
super(HTML5TestStorySet, self).__init__()
urls_list = [
'http://html5test.com/',
]
for url in urls_list:
self.AddStory(HTML5TestPage(url, self))
|
video_encoding/fields.py | fossabot/django-video-encoding | 164 | 4406 | from django.db.models.fields.files import (FieldFile, ImageField,
ImageFileDescriptor)
from django.utils.translation import ugettext as _
from .backends import get_backend_class
from .files import VideoFile
class VideoFileDescriptor(ImageFileDescriptor):
pass
class VideoFieldFile(VideoFile, FieldFile):
def delete(self, save=True):
# Clear the video info cache
if hasattr(self, '_info_cache'):
del self._info_cache
super(VideoFieldFile, self).delete(save=save)
class VideoField(ImageField):
attr_class = VideoFieldFile
descriptor_class = VideoFileDescriptor
description = _("Video")
def __init__(self, verbose_name=None, name=None, duration_field=None,
**kwargs):
self.duration_field = duration_field
super(VideoField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_backend())
return errors
def _check_backend(self):
backend = get_backend_class()
return backend.check()
def to_python(self, data):
# use FileField method
return super(ImageField, self).to_python(data)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
_file = getattr(instance, self.attname)
# we need a real file
if not _file._committed:
return
# write `width` and `height`
super(VideoField, self).update_dimension_fields(instance, force,
*args, **kwargs)
if not self.duration_field:
return
# Nothing to update if we have no file and not being forced to update.
if not _file and not force:
return
if getattr(instance, self.duration_field) and not force:
return
# get duration if file is defined
duration = _file.duration if _file else None
# update duration
setattr(instance, self.duration_field, duration)
def formfield(self, **kwargs):
# use normal FileFieldWidget for now
return super(ImageField, self).formfield(**kwargs)
|
dialogue-engine/test/programytest/config/brain/test_oob.py | cotobadesign/cotoba-agent-oss | 104 | 4415 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.config.brain.oob import BrainOOBConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class BrainOOBConfigurationTests(unittest.TestCase):
def test_oob_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
classname: programy.oob.defaults.default.DefaultOutOfBandProcessor
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertEqual("programy.oob.defaults.default.DefaultOutOfBandProcessor", oob_config.classname)
def test_default_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
oobs:
default:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
oobs_config = yaml.get_section("oobs", brain_config)
self.assertIsNotNone(oobs_config)
oob_config = BrainOOBConfiguration("default")
oob_config.load_config_section(yaml, oobs_config, ".")
self.assertIsNone(oob_config.classname)
|
widgets/datepicker_ctrl/codegen.py | RSabet/wxGlade | 225 | 4438 | """\
Code generator functions for wxDatePickerCtrl objects
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@copyright: 2016-2021 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common, compat
import wcodegen
class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter):
tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\n'
# XXX the following needs to depend on the code generator when Phoenix is about to be supported fully:
if compat.IS_PHOENIX:
import_modules = ['import wx.adv\n']
if compat.IS_PHOENIX:
def cn(self, name):
# don't process already formatted items again
if name.startswith('wx.'):
return name
if name.startswith('wx'):
return 'wx.adv.' + name[2:]
elif name.startswith('EVT_'):
return 'wx.adv.' + name
return name
def _prepare_tmpl_content(self, obj):
wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter):
import_modules = ['<wx/datectrl.h>']
tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \
'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \
'%(style)s);\n'
prefix_style = False
set_default_style = True
def _prepare_tmpl_content(self, obj):
wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
def xrc_code_generator(obj):
xrcgen = common.code_writers['XRC']
class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject):
def write_property(self, name, val, output, tabs):
if name == 'label':
# translate & into _ as accelerator marker
val2 = val.replace('&', '_')
if val.count('&&') > 0:
while True:
index = val.find('&&')
if index < 0:
break
val = val2[:index] + '&&' + val2[index+2:]
else:
val = val2
xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs)
return DatePickerCtrlXrcObject(obj)
def initialize():
klass = 'wxDatePickerCtrl'
common.class_names['EditDatePickerCtrl'] = klass
common.register('python', klass, PythonDatePickerCtrlGenerator(klass))
common.register('C++', klass, CppDatePickerCtrlGenerator(klass))
common.register('XRC', klass, xrc_code_generator)
|
tests/checks/run_performance_tests.py | stjordanis/mljar-supervised | 1,882 | 4442 | import os
import sys
import unittest
from tests.tests_bin_class.test_performance import *
if __name__ == "__main__":
unittest.main()
|
tests/unit/small_text/integrations/pytorch/test_strategies.py | chschroeder/small-text | 218 | 4451 | <reponame>chschroeder/small-text<filename>tests/unit/small_text/integrations/pytorch/test_strategies.py
import unittest
import pytest
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
from small_text.integrations.pytorch.query_strategies import (
BADGE,
ExpectedGradientLength,
ExpectedGradientLengthMaxWord)
except PytorchNotFoundError:
pass
@pytest.mark.pytorch
class BADGETest(unittest.TestCase):
def test_init_default(self):
strategy = BADGE(2)
self.assertEqual(2, strategy.num_classes)
def test_init(self):
strategy = BADGE(4)
self.assertEqual(4, strategy.num_classes)
def test_badge_str(self):
strategy = BADGE(2)
expected_str = 'BADGE(num_classes=2)'
self.assertEqual(expected_str, str(strategy))
@pytest.mark.pytorch
class ExpectedGradientLengthTest(unittest.TestCase):
def test_init_default(self):
strategy = ExpectedGradientLength(2)
self.assertEqual(2, strategy.num_classes)
self.assertEqual(50, strategy.batch_size)
self.assertEqual('cuda', strategy.device)
def test_init(self):
strategy = ExpectedGradientLength(4, batch_size=100, device='cpu')
self.assertEqual(4, strategy.num_classes)
self.assertEqual(100, strategy.batch_size)
self.assertEqual('cpu', strategy.device)
def test_expected_gradient_length_str(self):
strategy = ExpectedGradientLength(2)
expected_str = 'ExpectedGradientLength()'
self.assertEqual(expected_str, str(strategy))
@pytest.mark.pytorch
class ExpectedGradientLengthMaxWordTest(unittest.TestCase):
def test_init_default(self):
strategy = ExpectedGradientLengthMaxWord(2, 'embedding')
self.assertEqual(2, strategy.num_classes)
self.assertEqual(50, strategy.batch_size)
self.assertEqual('cuda', strategy.device)
self.assertEqual('embedding', strategy.layer_name)
def test_init(self):
strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu')
self.assertEqual(4, strategy.num_classes)
self.assertEqual(100, strategy.batch_size)
self.assertEqual('cpu', strategy.device)
self.assertEqual('embedding', strategy.layer_name)
|
pymterm/colour/tango.py | stonewell/pymterm | 102 | 4452 | <gh_stars>100-1000
TANGO_PALLETE = [
'2e2e34343636',
'cccc00000000',
'4e4e9a9a0606',
'c4c4a0a00000',
'34346565a4a4',
'757550507b7b',
'060698989a9a',
'd3d3d7d7cfcf',
'555557575353',
'efef29292929',
'8a8ae2e23434',
'fcfce9e94f4f',
'72729f9fcfcf',
'adad7f7fa8a8',
'3434e2e2e2e2',
'eeeeeeeeecec',
]
def parse_tango_color(c):
r = int(c[:4][:2], 16)
g = int(c[4:8][:2], 16)
b = int(c[8:][:2], 16)
return [r, g, b, 0xFF]
def apply_color(cfg, color_table):
cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec')
cfg.default_background_color = parse_tango_color('323232323232')
cfg.default_cursor_color = cfg.default_foreground_color
for i in range(len(TANGO_PALLETE)):
if i < len(color_table):
color_table[i] = parse_tango_color(TANGO_PALLETE[i])
|
examples/django_mongoengine/bike/models.py | pfrantz/graphene-mongo | 260 | 4456 | <filename>examples/django_mongoengine/bike/models.py
from mongoengine import Document
from mongoengine.fields import (
FloatField,
StringField,
ListField,
URLField,
ObjectIdField,
)
class Shop(Document):
meta = {"collection": "shop"}
ID = ObjectIdField()
name = StringField()
address = StringField()
website = URLField()
class Bike(Document):
meta = {"collection": "bike"}
ID = ObjectIdField()
name = StringField()
brand = StringField()
year = StringField()
size = ListField(StringField())
wheel_size = FloatField()
type = StringField()
|
pyxley/charts/plotly/base.py | snowind/pyxley | 2,536 | 4463 |
from ..charts import Chart
from flask import jsonify, request
_BASE_CONFIG = {
"showLink": False,
"displaylogo": False,
"modeBarButtonsToRemove": ["sendDataToCloud"]
}
class PlotlyAPI(Chart):
""" Base class for Plotly.js API
This class is used to create charts using the plotly.js api
To keep this general, this chart does not have a default
method of transmitting data. Instead the user must supply
a route_func method.
"""
def __init__(self, chart_id, url, route_func, init_params={}):
options = {
"chartid": chart_id,
"url": url,
"params": init_params
}
super(PlotlyAPI, self).__init__("PlotlyAPI", options, route_func)
@staticmethod
def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
""" basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters
"""
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout,
"config": config
}
|
Lib/test/test_runpy.py | arvindm95/unladen-swallow | 2,293 | 4467 | # Test the runpy module
import unittest
import os
import os.path
import sys
import tempfile
from test.test_support import verbose, run_unittest, forget
from runpy import _run_code, _run_module_code, run_module
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.failUnless(d["result"] == self.expected_result)
self.failUnless(d["__name__"] is None)
self.failUnless(d["__file__"] is None)
self.failUnless(d["__loader__"] is None)
self.failUnless(d["__package__"] is None)
self.failUnless(d["run_argv0"] is saved_argv0)
self.failUnless("run_name" not in d)
self.failUnless(sys.argv[0] is saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.failUnless("result" not in d1)
self.failUnless(d2["initial"] is initial)
self.failUnless(d2["result"] == self.expected_result)
self.failUnless(d2["nested"]["x"] == 1)
self.failUnless(d2["__name__"] is name)
self.failUnless(d2["run_name_in_sys_modules"])
self.failUnless(d2["module_in_sys_modules"])
self.failUnless(d2["__file__"] is file)
self.failUnless(d2["run_argv0"] is file)
self.failUnless(d2["__loader__"] is loader)
self.failUnless(d2["__package__"] is package)
self.failUnless(sys.argv[0] is saved_argv0)
self.failUnless(name not in sys.modules)
class RunModuleTest(unittest.TestCase):
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package
self.expect_import_error("logging")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth):
pkg_name = "__runpy_pkg__"
test_fname = "runpy_test"+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + "runpy_test"
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.failUnless("x" in d1)
self.failUnless(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.failUnless("x" in d2)
self.failUnless(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.failUnless("__package__" in d1)
self.failUnless(d1["__package__"] == pkg_name)
self.failUnless("sibling" in d1)
self.failUnless("nephew" in d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.failUnless("__package__" in d2)
self.failUnless(d2["__package__"] == pkg_name)
self.failUnless("sibling" in d2)
self.failUnless("nephew" in d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
def test_main():
run_unittest(RunModuleCodeTest)
run_unittest(RunModuleTest)
if __name__ == "__main__":
test_main()
|
mmdet/ops/dcn/__init__.py | TJUsym/TJU_Advanced_CV_Homework | 1,158 | 4471 | <reponame>TJUsym/TJU_Advanced_CV_Homework
from .functions.deform_conv import deform_conv, modulated_deform_conv
from .functions.deform_pool import deform_roi_pooling
from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
DeformConvPack, ModulatedDeformConvPack)
from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
ModulatedDeformRoIPoolingPack)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling'
]
|
.modules/.theHarvester/discovery/twittersearch.py | termux-one/EasY_HaCk | 1,103 | 4486 | import string
import requests
import sys
import myparser
import re
class search_twitter:
def __init__(self, word, limit):
self.word = word.replace(' ', '%20')
self.results = ""
self.totalresults = ""
self.server = "www.google.com"
self.hostname = "www.google.com"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7"
self.quantity = "100"
self.limit = int(limit)
self.counter = 0
def do_search(self):
try:
urly="https://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20" + self.word
except Exception, e:
print e
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}
try:
r=requests.get(urly,headers=headers)
except Exception,e:
print e
self.results = r.content
self.totalresults += self.results
def get_people(self):
rawres = myparser.parser(self.totalresults, self.word)
return rawres.people_twitter()
def process(self):
while (self.counter < self.limit):
self.do_search()
self.counter += 100
print "\tSearching " + str(self.counter) + " results.."
|
tests/basics/generator_pend_throw.py | iotctl/pycopy | 663 | 4493 | def gen():
i = 0
while 1:
yield i
i += 1
g = gen()
try:
g.pend_throw
except AttributeError:
print("SKIP")
raise SystemExit
print(next(g))
print(next(g))
g.pend_throw(ValueError())
v = None
try:
v = next(g)
except Exception as e:
print("raised", repr(e))
print("ret was:", v)
# It's legal to pend exception in a just-started generator, just the same
# as it's legal to .throw() into it.
g = gen()
g.pend_throw(ValueError())
try:
next(g)
except ValueError:
print("ValueError from just-started gen")
|
lib/roi_data/loader.py | BarneyQiao/pcl.pytorch | 233 | 4511 | import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db, self._num_classes)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
# for key in blobs:
# if key != 'roidb':
# blobs[key] = blobs[key].squeeze(axis=0)
blobs['data'] = blobs['data'].squeeze(axis=0)
return blobs
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
def __iter__(self):
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists = []
for blobs in list_of_blobs:
lists.append({'data' : blobs.pop('data'),
'rois' : blobs.pop('rois'),
'labels' : blobs.pop('labels')})
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
minibatch = default_collate(mini_list)
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
|
Bindings/Python/examples/Moco/examplePredictAndTrack.py | mcx/opensim-core | 532 | 4518 | <reponame>mcx/opensim-core
# -------------------------------------------------------------------------- #
# OpenSim Moco: examplePredictAndTrack.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2018 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import os
import math
import opensim as osim
"""
This file performs the following problems using a
double pendulum model:
1. predict an optimal trajectory (and controls),
2. track the states from the optimal trajectory, and
3. track the marker trajectories from the optimal trajectory.
"""
visualize = True
# The following environment variable is set during automated testing.
if os.getenv('OPENSIM_USE_VISUALIZER') == '0':
visualize = False
# Create a model of a double pendulum.
# ------------------------------------
def createDoublePendulumModel():
model = osim.Model()
model.setName("double_pendulum")
# Create two links, each with a mass of 1 kg, center of mass at the body's
# origin, and moments and products of inertia of zero.
b0 = osim.Body("b0", 1, osim.Vec3(0), osim.Inertia(1))
model.addBody(b0)
b1 = osim.Body("b1", 1, osim.Vec3(0), osim.Inertia(1))
model.addBody(b1)
# Add markers to body origin locations.
m0 = osim.Marker("m0", b0, osim.Vec3(0))
m1 = osim.Marker("m1", b1, osim.Vec3(0))
model.addMarker(m0)
model.addMarker(m1)
# Connect the bodies with pin joints. Assume each body is 1 m long.
j0 = osim.PinJoint("j0", model.getGround(), osim.Vec3(0), osim.Vec3(0),
b0, osim.Vec3(-1, 0, 0), osim.Vec3(0))
q0 = j0.updCoordinate()
q0.setName("q0")
j1 = osim.PinJoint("j1",
b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0))
q1 = j1.updCoordinate()
q1.setName("q1")
model.addJoint(j0)
model.addJoint(j1)
tau0 = osim.CoordinateActuator()
tau0.setCoordinate(j0.updCoordinate())
tau0.setName("tau0")
tau0.setOptimalForce(1)
model.addComponent(tau0)
tau1 = osim.CoordinateActuator()
tau1.setCoordinate(j1.updCoordinate())
tau1.setName("tau1")
tau1.setOptimalForce(1)
model.addComponent(tau1)
# Add display geometry.
bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1)
transform = osim.Transform(osim.Vec3(-0.5, 0, 0))
b0Center = osim.PhysicalOffsetFrame("b0_center", b0, transform)
b0.addComponent(b0Center)
b0Center.attachGeometry(bodyGeometry.clone())
b1Center = osim.PhysicalOffsetFrame("b1_center", b1, transform)
b1.addComponent(b1Center)
b1Center.attachGeometry(bodyGeometry.clone())
model.finalizeConnections()
model.printToXML("double_pendulum.osim")
return model
def solvePrediction():
# Predict the optimal trajectory for a minimum time swing-up.
# In the diagram below, + represents the origin, and ---o represents a link
# in the double pendulum.
#
# o
# |
# o
# |
# +---o---o +
#
# iniital pose final pose
#
study = osim.MocoStudy()
study.setName("double_pendulum_predict")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
problem.setTimeBounds(0, [0, 5])
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0, 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0, 0)
problem.setControlInfo("/tau0", [-100, 100])
problem.setControlInfo("/tau1", [-100, 100])
# Cost: minimize final time and error from desired
# end effector position.
ftCost = osim.MocoFinalTimeGoal()
ftCost.setWeight(0.001)
problem.addGoal(ftCost)
finalCost = osim.MocoMarkerFinalGoal()
finalCost.setName("final")
finalCost.setWeight(1000.0)
finalCost.setPointName("/markerset/m1")
finalCost.setReferenceLocation(osim.Vec3(0, 2, 0))
problem.addGoal(finalCost)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(100)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
guess = solver.createGuess()
guess.setNumTimes(2)
guess.setTime([0, 1])
guess.setState("/jointset/j0/q0/value", [0, -math.pi])
guess.setState("/jointset/j1/q1/value", [0, 2*math.pi])
guess.setState("/jointset/j0/q0/speed", [0, 0])
guess.setState("/jointset/j1/q1/speed", [0, 0])
guess.setControl("/tau0", [0, 0])
guess.setControl("/tau1", [0, 0])
guess.resampleWithNumTimes(10)
solver.setGuess(guess)
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_predict.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_predict_solution.sto")
if visualize:
study.visualize(solution)
return solution
def computeMarkersReference(predictedSolution):
model = createDoublePendulumModel()
model.initSystem()
states = predictedSolution.exportToStatesTable()
statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states)
markerTrajectories = osim.TimeSeriesTableVec3()
markerTrajectories.setColumnLabels(["/markerset/m0", "/markerset/m1"])
for state in statesTraj:
model.realizePosition(state)
m0 = model.getComponent("markerset/m0")
m1 = model.getComponent("markerset/m1")
markerTrajectories.appendRow(state.getTime(),
osim.RowVectorVec3([m0.getLocationInGround(state),
m1.getLocationInGround(state)]))
# Assign a weight to each marker.
markerWeights = osim.SetMarkerWeights()
markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m0", 1))
markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m1", 5))
return osim.MarkersReference(markerTrajectories, markerWeights)
def solveStateTracking(stateRef):
# Predict the optimal trajectory for a minimum time swing-up.
study = osim.MocoStudy()
study.setName("double_pendulum_track")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime = stateRef.getIndependentColumn()[-1]
problem.setTimeBounds(0, finalTime)
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0)
problem.setControlInfo("/tau0", [-150, 150])
problem.setControlInfo("/tau1", [-150, 150])
# Cost: track provided state data.
stateTracking = osim.MocoStateTrackingGoal()
stateTracking.setReference(osim.TableProcessor(stateRef))
problem.addGoal(stateTracking)
effort = osim.MocoControlGoal()
effort.setName("effort")
effort.setWeight(0.001)
# TODO problem.addGoal(effort)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(50)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
solver.set_optim_jacobian_approximation("exact")
solver.set_optim_hessian_approximation("exact")
solver.set_exact_hessian_block_sparsity_mode("dense")
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_states.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_track_states_solution.sto")
if visualize:
study.visualize(solution)
return solution
def solveMarkerTracking(markersRef, guess):
# Predict the optimal trajectory for a minimum time swing-up.
study = osim.MocoStudy()
study.setName("double_pendulum_track")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1]
problem.setTimeBounds(0, finalTime)
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0)
problem.setControlInfo("/tau0", [-100, 100])
problem.setControlInfo("/tau1", [-100, 100])
# Cost: track provided marker data.
markerTracking = osim.MocoMarkerTrackingGoal()
markerTracking.setMarkersReference(markersRef)
problem.addGoal(markerTracking)
effort = osim.MocoControlGoal()
effort.setName("effort")
effort.setWeight(0.0001)
# problem.addGoal(effort)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(50)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
solver.set_optim_jacobian_approximation("exact")
solver.set_optim_hessian_approximation("exact")
solver.set_exact_hessian_block_sparsity_mode("dense")
solver.setGuess(guess)
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_markers.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_track_markers_solution.sto")
if visualize:
study.visualize(solution)
return solution
optimalTrajectory = solvePrediction()
markersRef = computeMarkersReference(optimalTrajectory)
trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable())
trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
|
tests/syntax/missing_in_with_for.py | matan-h/friendly | 287 | 4529 | <filename>tests/syntax/missing_in_with_for.py
for x range(4):
print(x)
|
tests/factories.py | luzik/waliki | 324 | 4535 | <reponame>luzik/waliki<filename>tests/factories.py
import factory
from django.contrib.auth.models import User, Group, Permission
from waliki.models import ACLRule, Page, Redirect
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: u'user{0}'.format(n))
password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')
email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)
class Meta:
model = User
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.user_set.add(user)
class ACLRuleFactory(factory.django.DjangoModelFactory):
class Meta:
model = ACLRule
name = factory.Sequence(lambda n: u'Rule {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for perm in extracted:
if not isinstance(perm, Permission):
perm = Permission.objects.get(content_type__app_label='waliki', codename=perm)
self.permissions.add(perm)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.users.add(user)
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class PageFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: u'Page {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def raw(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
self.raw = extracted
class Meta:
model = Page
class RedirectFactory(factory.django.DjangoModelFactory):
old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n))
new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n))
class Meta:
model = Redirect
|
nxt_editor/commands.py | dalteocraft/nxt_editor | 131 | 4536 | # Built-in
import copy
import logging
import time
# External
from Qt.QtWidgets import QUndoCommand
# Internal
from nxt_editor import colors
from nxt_editor import user_dir
from nxt import nxt_path
from nxt.nxt_layer import LAYERS, SAVE_KEY
from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict,
list_merger)
from nxt import nxt_io
from nxt import GRID_SIZE
import nxt_editor
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
def processing(func):
def wrapper(self):
self.model.processing.emit(True)
func(self)
self.model.processing.emit(False)
return wrapper
class NxtCommand(QUndoCommand):
def __init__(self, model):
super(NxtCommand, self).__init__()
self.model = model
self.model.layer_saved.connect(self.reset_layer_effected)
self._layers_effected_by_me = {}
def _get_effects(self, layer_path):
"""Gets the effected state for a given layer with context to this
command. Since a single command can effect layers in different ways.
:param layer_path: string of layer real path
:return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo)
"""
first_eff_by_undo = False
first_eff_by_redo = False
try:
first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo']
except KeyError:
pass
try:
first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo']
except KeyError:
pass
return first_eff_by_undo, first_eff_by_redo
def reset_layer_effected(self, layer_just_saved):
"""When the model marks a layer as saved we reset the class attr
`_first_effected_by_redo` to False. This makes sure the layer is
properly marked as unsaved even if we undo an action after saving it.
:param layer_just_saved: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved)
where_were_at = self.model.undo_stack.index()
cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1))
if cur_cmd is self:
return
if layer_just_saved in self._layers_effected_by_me:
if eff_by_undo:
# This command has already been marked as undo effects the
# layer, meaning the layer has been saved and the undo queue
# was moved to an index before this command and the same
# layer was saved again.
eff_by_redo = True
eff_by_undo = False
else:
# Now the undo of this command effects the layer not the redo
eff_by_redo = False
eff_by_undo = True
self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo,
'redo': eff_by_redo}
def redo_effected_layer(self, layer_path):
"""Adds layer to the model's set of effected (unsaved) layers. If
this command was the first to effect the layer we mark it as such
by setting the class attr `_first_effected_by_redo` to True.
:param layer_path: string of layer real path
:return: None
"""
layer_unsaved = layer_path in self.model.effected_layers
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
if not eff_by_undo and layer_unsaved:
return
if not eff_by_undo:
self._layers_effected_by_me[layer_path] = {'undo': False,
'redo': True}
self.model.effected_layers.add(layer_path)
else:
# Layer was saved and then undo was called, thus this redo has a
# net zero effect on the layer
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
def undo_effected_layer(self, layer_path):
"""Removes layer from the model's set of effected (unsaved) layers.
If the layer is not marked as effected in the model we mark it as
effected. This case happens when undo is called after a layer is saved.
:param layer_path: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
layer_saved = layer_path not in self.model.effected_layers
if layer_saved:
eff_by_undo = True
# Set redo to False since now its been saved & the undo effects it
eff_by_redo = False
self.model.effected_layers.add(layer_path)
elif eff_by_redo:
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo,
'redo': eff_by_redo}
class AddNode(NxtCommand):
"""Add a node to the graph"""
def __init__(self, name, data, parent_path, pos, model, layer_path):
super(AddNode, self).__init__(model)
self.name = name
self.data = data
self.parent_path = parent_path
self.layer_path = layer_path
self.stage = model.stage
# command data
self.pos = pos or [0.0, 0.0]
self.prev_selection = self.model.selection
# resulting node
self.node_path = None
self.created_node_paths = []
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
dirty_nodes = []
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
_, dirty = self.stage.delete_node(node, layer,
remove_layer_data=False)
dirty_nodes += dirty
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
comp_layer = self.model.comp_layer
if node is not None:
# delete node
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data)
dirty_nodes += dirty
dirty_nodes += self.created_node_paths
dirty_nodes += [self.node_path]
self.undo_effected_layer(self.layer_path)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = self.prev_selection
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.created_node_paths = []
dirty_nodes = []
nodes, dirty = self.stage.add_node(name=self.name, data=self.data,
parent=self.parent_path,
layer=layer.layer_idx(),
comp_layer=self.model.comp_layer)
dirty_nodes += dirty
self.node_path = layer.get_node_path(nodes[0])
self.model._set_node_pos(node_path=self.node_path, pos=self.pos,
layer=layer)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = [self.node_path]
self.redo_effected_layer(layer.real_path)
self.setText('Added node: {}'.format(self.node_path))
class DeleteNode(NxtCommand):
def __init__(self, node_path, model, layer_path, other_removed_nodes):
"""Delete node from the layer at the layer path and the comp layer.
It is important to note that the other_removed_nodes
list must be shared by other DeleteNode commands in a command macro.
The list will be mutated by the stage as it deletes node, this
behavior is depended upon!
:param node_path: String of node path
:param model: StageModel
:param layer_path: String of layer realpath
:param other_removed_nodes: list of node paths that will be deleted
in this event loop.
"""
super(DeleteNode, self).__init__(model)
self.layer_path = layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
self.prev_starts = []
self.prev_breaks = {}
self.node_path = node_path
self.node_data = {}
self.others = other_removed_nodes
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
parent = self.node_data['parent']
# We don't want to fix names because we know this node should be
# named what it was named when it was deleted
new_nodes, dirty = self.stage.add_node(name=self.node_data['name'],
data=self.node_data['save_dict'],
parent=parent,
layer=layer.layer_idx(),
comp_layer=comp_layer,
fix_names=False)
if self.node_data['break']:
self.model._add_breakpoint(self.node_path, layer)
self.model._add_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._add_start_node(self.node_path, layer)
# restore layer data
pos = self.node_data.get('pos')
if pos:
self.model.top_layer.positions[self.node_path] = pos
# This might be a bug? We don't touch the top layer in redo...
self.undo_effected_layer(self.stage.top_layer.real_path)
attr_display = self.node_data.get('attr_display')
if attr_display is not None:
self.model._set_attr_display_state(self.node_path, attr_display)
user_dir.breakpoints = self.prev_breaks
ancestor_tuple = self.node_data.get('ancestor_child_order')
if ancestor_tuple:
ancestor_path, ancestor_child_order = ancestor_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER,
ancestor_child_order)
self.model.selection = self.prev_selection
# Fixme: Does not account for rebuilding proxy nodes for the dirty nodes
dirty_set = tuple(set(dirty))
self.undo_effected_layer(self.layer_path)
if dirty_set != (self.node_path,):
self.model.update_comp_layer(rebuild=True)
else:
self.model.nodes_changed.emit(dirty_set)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
self.node_data = {}
self.prev_starts = self.model.get_start_nodes(layer)
self.prev_breaks = user_dir.breakpoints
dirty_nodes = []
node = layer.lookup(self.node_path)
# get node info
parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
name = getattr(node, INTERNAL_ATTRS.NAME)
is_break = self.model.get_is_node_breakpoint(self.node_path, layer)
self.node_data = {'parent': parent, 'name': name,
'pos': self.model.get_node_pos(self.node_path),
'break': is_break}
closest_ancestor = layer.ancestors(self.node_path)
if closest_ancestor:
closest_ancestor = closest_ancestor[0]
else:
closest_ancestor = None
closest_ancestor_path = layer.get_node_path(closest_ancestor)
if closest_ancestor_path:
ancestor_child_order = getattr(closest_ancestor,
INTERNAL_ATTRS.CHILD_ORDER)
self.node_data['ancestor_child_order'] = (closest_ancestor_path,
ancestor_child_order[:])
# Attr display data
attr_display = self.model.get_attr_display_state(self.node_path)
if attr_display is not None:
self.node_data['attr_display'] = attr_display
# get layer data
is_start = self.model.get_is_node_start(self.node_path, layer)
self.node_data['start'] = is_start
self.node_data['save_dict'] = get_node_as_dict(node)
if self.node_data['break']:
self.model._remove_breakpoint(self.node_path, layer)
self.model._remove_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._remove_start_node(self.node_path, layer)
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
for p in self.others[:]:
self.others += comp_layer.get_node_dirties(p)
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data,
other_removed_nodes=self.others)
dirty_nodes += dirty + [self.node_path]
if self.node_path in self.model.selection:
fix_selection = self.model.selection[:]
fix_selection.remove(self.node_path)
self.model.selection = fix_selection
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.redo_effected_layer(layer.real_path)
self.setText("Delete node: {}".format(self.node_path))
class SetNodeAttributeData(NxtCommand):
"""Set attribute value"""
def __init__(self, node_path, attr_name, data, model, layer_path):
super(SetNodeAttributeData, self).__init__(model)
self.node_path = node_path
self.nice_attr_name = attr_name
self.attr_name = attr_name
self.data = data
self.stage = model.stage
self.layer_path = layer_path
self.created_node_paths = []
self.remove_attr = False
self.prev_data = {}
self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP
self.return_value = None
self.prev_selection = model.selection
@processing
def undo(self):
start = time.time()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
comp = self.model.comp_layer
dirties = [self.node_path]
# delete any created nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=layer, comp_layer=comp,
remove_layer_data=False)
n = layer.lookup(self.node_path)
if n is not None:
if self.remove_attr:
self.stage.delete_node_attr(n, self.attr_name)
dirties += comp.get_node_dirties(self.node_path)
else:
result = self.stage.node_setattr_data(node=n,
attr=self.attr_name,
layer=layer, create=False,
comp_layer=comp,
**self.prev_data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += result
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
self.model.attrs_changed.emit(changed_attrs)
if not self.recomp:
changed = tuple([self.node_path] + self.created_node_paths)
self.model.nodes_changed.emit(changed)
self.model.selection = self.prev_selection
# undo_debug(self, start)
@processing
def redo(self):
start = time.time()
created_node = False
self.prev_selection = self.model.selection
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
comp = self.model.comp_layer
self.remove_attr = False
self.created_node_paths = []
# get the node
node = layer.lookup(self.node_path)
dirties = [self.node_path]
if node is None:
parent_path = nxt_path.get_parent_path(self.node_path)
name = nxt_path.node_name_from_node_path(self.node_path)
if self.attr_name in INTERNAL_ATTRS.ALL:
self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name)
attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)}
else:
attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}}
self.return_value = self.attr_name
_, dirties = self.stage.add_node(name=name, data=attr_data,
parent=parent_path,
layer=layer.layer_idx(),
comp_layer=comp,
fix_names=False)
# Fixme: Targeted parenting would avoid the need for a recomp
if layer.descendants(self.node_path):
self.recomp = True
created_node = True
self.created_node_paths += [self.node_path]
node = layer.lookup(self.node_path)
self.prev_data = self.stage.get_node_attr_data(node, self.attr_name,
layer, quiet=True)
if self.prev_data:
self.prev_data = copy.deepcopy(self.prev_data)
# set attribute value this also adds the attribute if it does not exist
if not self.stage.node_attr_exists(node, self.attr_name):
self.remove_attr = True
if not created_node:
self.return_value = self.stage.node_setattr_data(node,
self.attr_name,
layer=layer,
create=True,
comp_layer=comp,
**self.data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += self.return_value
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
self.model.attrs_changed.emit(changed_attrs)
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
val = str(self.data.get(META_ATTRS.VALUE))
self.setText("Set {} to {}".format(attr_path, val))
# redo_debug(self, start)
class SetNodeAttributeValue(SetNodeAttributeData):
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data,
model, layer_path)
class RenameNode(SetNodeAttributeValue):
"""Rename node"""
def __init__(self, node_path, name, model, layer_path):
self.old_node_path = node_path
layer = model.lookup_layer(layer_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(name=name, layer=layer,
parent_path=parent_path,
layer_only=True)
super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME,
new_name, model, layer_path)
def undo(self):
self.model.about_to_rename.emit()
self.prev_data['force'] = True
super(RenameNode, self).undo()
self.node_path = self.old_node_path
self.model.selection = [self.node_path]
def redo(self):
self.model.about_to_rename.emit()
super(RenameNode, self).redo()
self.node_path = self.return_value
self.model.selection = [self.node_path]
if self.model.get_is_node_start(self.node_path, self.model.comp_layer):
self.model.starts_changed.emit(self.model.get_start_nodes())
self.setText("{} renamed to {}".format(self.old_node_path,
self.return_value))
class DuplicateNodes(NxtCommand):
"""Duplicate nodes on this graph"""
def __init__(self, node_paths, descendants, model, source_layer_path,
target_layer_path):
# TODO: We should make another base command class that can be used to
# set multiple attr's data. That way duplicate can just be a
# setattr. The way it works now we can only set one attr's data at a
# time and duplicate needs to get local + INTERNAL number of attrs.
super(DuplicateNodes, self).__init__(model)
self.node_paths = node_paths
self.descendants = descendants
self.source_layer_path = source_layer_path
self.target_layer_path = target_layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
# resulting nodes
self.new_node_paths = []
@processing
def undo(self):
target_layer = self.model.lookup_layer(self.target_layer_path)
# delete duplicated nodes
for node_path in self.new_node_paths:
n = target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, target_layer,
remove_layer_data=True)
self.model.selection = self.prev_selection
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(target_layer.real_path)
@processing
def redo(self):
new_selection = []
self.new_node_paths = []
source_layer = self.model.lookup_layer(self.source_layer_path)
target_layer = self.model.lookup_layer(self.target_layer_path)
self.redo_effected_layer(target_layer.real_path)
for node_path in self.node_paths:
node = source_layer.lookup(node_path)
# duplicate node
new, dirty = self.stage.duplicate_node(node=node,
layer=target_layer,
descendants=self.descendants)
new_selection.append(target_layer.get_node_path(new[0]))
# process new nodes
for new_node in new:
# add new node path to the list and emit model signal
new_node_path = target_layer.get_node_path(new_node)
self.new_node_paths += [new_node_path]
# self.model.node_added.emit(new_node_path)
# set position
has_parent = self.model.node_has_parent(new_node_path,
target_layer)
if not has_parent and new_node_path != node_path:
pos = self.model.get_node_pos(node_path)
pos = [pos[0] + 20, pos[1] + 20]
self.model._set_node_pos(new_node_path, pos,
layer=target_layer)
self.model.selection = new_selection
self.model.update_comp_layer(rebuild=True)
if len(self.node_paths) == 1:
nodes_str = self.node_paths[0]
else:
nodes_str = 'nodes'
self.setText('Duplicated {}'.format(nodes_str))
class InstanceNode(SetNodeAttributeValue):
"""Instance nodes on this graph"""
def __init__(self, node_path, model, source_layer_path, target_layer_path):
src_name = nxt_path.node_name_from_node_path(node_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(src_name,
model.comp_layer,
parent_path=parent_path)
new_path = nxt_path.join_node_paths(parent_path, new_name)
self.new_path = new_path
super(InstanceNode, self).__init__(new_path,
INTERNAL_ATTRS.INSTANCE_PATH,
node_path, model, target_layer_path)
def redo(self):
node_path = self.data.get(META_ATTRS.VALUE)
layer = self.model.lookup_layer(self.layer_path)
new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0),
layer)
self.model._set_node_pos(self.new_path, new_pos, layer)
super(InstanceNode, self).redo()
self.return_value = self.new_path
self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE)))
class SetNodesPosition(NxtCommand):
"""Move nodes"""
def __init__(self, node_positions, model, layer_path):
super(SetNodesPosition, self).__init__(model)
self.model = model
self.layer_path = layer_path
self.new_positions = node_positions
self.old_positions = {}
for path in self.new_positions.keys():
self.old_positions[path] = model.get_node_pos(path)
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
for node_path, old_pos in self.old_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=old_pos, layer=layer)
self.undo_effected_layer(self.layer_path)
@processing
def redo(self):
delta_str = None
layer = self.model.lookup_layer(self.layer_path)
for node_path, new_pos in self.new_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=new_pos, layer=layer)
if not delta_str:
pos = new_pos
prev_pos = self.old_positions[node_path]
# Only letting it set text once, relying on consistent delta.
x_delta = pos[0] - prev_pos[0]
y_delta = pos[1] - prev_pos[1]
delta_str = '{}, {}'.format(x_delta, y_delta)
if len(self.new_positions) == 1:
nodes_str = node_path
else:
nodes_str = 'nodes'
self.setText('Move {} {}'.format(nodes_str, delta_str))
self.redo_effected_layer(layer.real_path)
class SetSelection(QUndoCommand):
"""Select Nodes and Connections"""
def __init__(self, paths, model):
super(SetSelection, self).__init__()
self.new_paths = paths
self.model = model
self.prev_paths = self.model.selection
def undo(self):
self.model.selection = self.prev_paths
def redo(self):
self.model.selection = self.new_paths
self.setText('Set selection: {}'.format(str(self.new_paths)))
class AddSelection(SetSelection):
def __init__(self, paths, model):
self.added_paths = paths
curr_selection = model.selection
new_paths = curr_selection + paths
super(AddSelection, self).__init__(new_paths, model)
def redo(self):
super(AddSelection, self).redo()
self.setText('Add {} to selection'.format(self.added_paths))
class RemoveFromSelection(SetSelection):
def __init__(self, paths, model):
self.rem_paths = paths
new_selection = model.selection[:]
for path in paths:
try:
new_selection.remove(path)
except ValueError:
continue
super(RemoveFromSelection, self).__init__(new_selection, model)
def redo(self):
super(RemoveFromSelection, self).redo()
self.setText('Remove {} from selection'.format(self.rem_paths))
class LocalizeNodes(NxtCommand):
"""Localize nodes"""
def __init__(self, node_paths, model):
super(LocalizeNodes, self).__init__(model)
self.node_paths = node_paths
self.model = model
self.stage = model.stage
self.prev_selection = self.model.selection
self.prev_node_data = {}
self.created_node_paths = []
@processing
def undo(self):
for node_path in self.created_node_paths:
n = self.model.target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=self.model.target_layer,
remove_layer_data=False)
layers = [self.model.target_layer]
for node_path, all_data in self.prev_node_data.items():
apply_data = {}
node = self.model.target_layer.lookup(node_path)
if not node:
continue
data = all_data['data']
child_order = all_data['data'].get('child_order', [])
apply_data['child_order'] = child_order
apply_data['attributes'] = data.get('attributes', {})
attrs_to_keep = apply_data['attributes'].keys()
apply_data['enabled'] = data.get('enabled')
if data.get('instance'):
apply_data['instance'] = data['instance']
self.stage.transfer_node_data(node, self.model.target_layer,
apply_data, self.model.comp_layer)
local_attrs = self.stage.get_node_local_attr_names(node_path,
layers)
for attr in local_attrs:
if attr not in attrs_to_keep:
self.stage.delete_node_attr(node=node, attr_name=attr)
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(layers[0].real_path)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.created_node_paths = []
layer = self.model.target_layer
for node_path in self.node_paths:
node_data = {}
display_node = self.model.comp_layer.lookup(node_path)
if not display_node:
continue
# add node if it doesn't exist on the target layer
target_node = self.model.target_layer.lookup(node_path)
if not target_node:
new_nodes, new_paths, dirty = _add_node_hierarchy(node_path,
self.model,
layer)
target_node = new_nodes[-1]
self.created_node_paths += new_paths
# self.model.node_added.emit(node_path)
# preserve original data
node_data['data'] = get_node_as_dict(target_node)
# localize source node
self.stage.transfer_node_data(target_node, self.model.target_layer,
display_node,
self.model.comp_layer)
self.prev_node_data[node_path] = node_data
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.redo_effected_layer(layer.real_path)
self.model.selection = self.prev_selection
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText('Localize {}'.format(str(path_str)))
class LocalizeUserAttr(SetNodeAttributeData):
"""Localize nodes"""
def __init__(self, node_path, attr_name, model, layer_path):
node = model.comp_layer.lookup(node_path)
data = model.stage.get_node_attr_data(node, attr_name,
model.comp_layer)
if META_ATTRS.SOURCE in data:
data.pop(META_ATTRS.SOURCE)
super(LocalizeUserAttr, self).__init__(node_path, attr_name, data,
model, layer_path)
class LocalizeCompute(SetNodeAttributeValue):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path):
comp_layer = model.comp_layer
display_node = comp_layer.lookup(node_path)
code_lines = model.stage.get_node_code_lines(display_node, comp_layer)
super(LocalizeCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(LocalizeCompute, self).redo()
self.setText("Localize compute on {}".format(self.node_path))
class LocalizeInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
inst_path = model.get_node_instance_path(node_path, model.comp_layer,
expand=False)
super(LocalizeInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
inst_path, model, layer_path)
def redo(self):
super(LocalizeInstancePath, self).redo()
self.setText("Localize instance path to {}".format(self.node_path))
class RevertInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
None, model, layer_path)
def redo(self):
super(RevertInstancePath, self).redo()
self.setText("Revert instance path on {}".format(self.node_path))
class LocalizeExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
exec_path = model.get_node_exec_in(node_path)
super(LocalizeExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_path, model, layer_path)
def redo(self):
super(LocalizeExecPath, self).redo()
self.setText("Localize exec input on {}".format(self.node_path))
class RevertExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN, None,
model, layer_path)
def redo(self):
self.setText("Revert exec input on {}".format(self.node_path))
class RevertNode(DeleteNode):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path, others):
super(RevertNode, self).__init__(node_path, model, layer_path, others)
self.rebuild = False # Tells the delete command not to re-comp
self.created_node_paths = []
self.node_path = node_path
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
# Remove our created empty nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer, remove_layer_data=False)
super(RevertNode, self).undo()
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
def redo(self):
self.created_node_paths = []
super(RevertNode, self).redo()
layer = self.model.lookup_layer(self.layer_path)
# Re-create the node as an empty node
new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path,
self.model, layer)
self.created_node_paths += new_paths
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.model.selection = self.prev_selection
self.setText('Revert {}'.format(self.node_path))
class ParentNodes(NxtCommand):
"""Parent Nodes"""
def __init__(self, node_paths, parent_node_path, model):
super(ParentNodes, self).__init__(model)
self.parent_node_path = parent_node_path
self.parent_node = None
self.model = model
self.stage = model.stage
self.node_paths = node_paths
# resulting nodes
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
# get node selection for undo
self.prev_selection = self.model.selection
# get previous node data for all child nodes for undo
self.prev_node_data = {}
@processing
def undo(self):
layer = self.model.target_layer
self.undo_effected_layer(layer.real_path)
# undo parent
common_parent_nodes = {}
for old_path, node_data in self.prev_node_data.items():
prev_parent_path = node_data['parent']
prev_parent_node = layer.lookup(prev_parent_path)
new_path = self.node_path_data[old_path]
node = layer.lookup(new_path)
if prev_parent_path not in list(common_parent_nodes.keys()):
common_parent_nodes[prev_parent_path] = {node: old_path}
else:
common_parent_nodes[prev_parent_path][node] = old_path
child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER)
if child_order_tuple:
ancestor_path, child_order = child_order_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
self.stage.set_node_child_order(ancestor, child_order,
layer)
if new_path in list(self.model.top_layer.positions.keys()):
source_layer = self.stage.get_node_source_layer(node)
source_layer.positions.pop(new_path)
for parent_path, nodes_dict in common_parent_nodes.items():
self.stage.parent_nodes(nodes=list(nodes_dict.keys()),
parent_path=parent_path,
layer=layer)
for parent_path, nodes_dict in common_parent_nodes.items():
for node, old_path in nodes_dict.items():
node_data = self.prev_node_data[old_path]
# restore name
prev_name = node_data['name']
name = getattr(node, INTERNAL_ATTRS.NAME)
if name != prev_name:
self.stage.set_node_name(node, name=prev_name,
layer=layer, force=True)
# restore position
if self.parent_node_path != nxt_path.WORLD:
prev_pos = node_data['pos']
source_layer = self.stage.get_node_source_layer(node)
self.model._set_node_pos(old_path, prev_pos,
layer=source_layer)
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
self.stage.delete_node(node, layer)
idx = 0
for old_node_path in self.node_paths:
new_node_path = self.new_node_paths[idx]
attr_state = self.model.remove_attr_display_state(new_node_path)
if attr_state is not None:
self.model._set_attr_display_state(old_node_path, attr_state)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
nodes = []
layer = self.model.target_layer
self.redo_effected_layer(layer.real_path)
for node_path in self.node_paths:
node = layer.lookup(node_path)
name = getattr(node, INTERNAL_ATTRS.NAME)
parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
self.stage.get_node_data(node, layer)
node_data = self.stage.get_node_data(node, layer)
node_data['pos'] = self.model.get_node_pos(node_path)
node_data['name'] = name
node_data['parent'] = parent_path
parent_node = layer.lookup(parent_path)
ancestor_path = parent_path
child_order = []
if parent_node:
child_order = getattr(parent_node,
INTERNAL_ATTRS.CHILD_ORDER)
else:
ancestors = layer.ancestors(node_path)
if ancestors:
ancestor = ancestors[0]
ancestor_path = layer.get_node_path(ancestor)
child_order = self.stage.get_node_child_order(ancestor)
node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path,
child_order]
self.prev_node_data[node_path] = node_data
nodes += [node]
# get current node hierarchy information for each node. each node
# path is placed in a list of descendants for each top node so when
# they are un-parented each node can be placed visually beside it's
# original top node.
node_hierarchy_data = {}
if self.parent_node_path is nxt_path.WORLD:
for node_path in self.node_paths:
node = layer.lookup(node_path)
top_node = self.stage.get_top_node(node,
self.model.target_layer)
if top_node is None:
top_node = node
top_node_path = layer.get_node_path(top_node)
top_node_descendant_list = node_hierarchy_data.get(top_node, [])
top_node_descendant_list += [node]
node_hierarchy_data[top_node_path] = top_node_descendant_list
if not node_hierarchy_data:
return
# parent
self.node_path_data = self.stage.parent_nodes(nodes,
self.parent_node_path,
layer)
self.new_node_paths = list(self.node_path_data.values())
idx = 0
for new_node_path in self.new_node_paths:
old_node_path = self.node_paths[idx]
attr_state = self.model.remove_attr_display_state(old_node_path)
if attr_state is not None:
self.model._set_attr_display_state(new_node_path, attr_state)
# set position for un-parent
if self.parent_node_path == nxt_path.WORLD:
old_root = nxt_path.get_root_path(old_node_path)
new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14,
GRID_SIZE),
self.model.top_layer)
self.model._set_node_pos(new_node_path, new_pos, layer)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = list(self.node_path_data.values())
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText("Parent {} to {}".format(path_str, self.parent_node_path))
class AddAttribute(SetNodeAttributeData):
"""Add an attribute to a node."""
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(AddAttribute, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(AddAttribute, self).redo()
self.remove_attr = True
self.setText("Add {} attr to {}".format(self.attr_name,
self.node_path))
class DeleteAttribute(AddAttribute):
"""Delete attribute on a node"""
def __init__(self, node_path, attr_name, model, layer_path):
super(DeleteAttribute, self).__init__(node_path, attr_name, None,
model, layer_path)
# Get the data to be set if undo is called
layer = self.model.lookup_layer(self.layer_path)
node = layer.lookup(self.node_path)
self.data = self.stage.get_node_attr_data(node, self.attr_name, layer)
def undo(self):
super(DeleteAttribute, self).redo()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
def redo(self):
# Overload remove attr here to insure attr is deleted
self.remove_attr = True
super(DeleteAttribute, self).undo()
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.setText("Remove {} attr from {}".format(self.attr_name,
self.node_path))
class RevertCompute(SetNodeAttributeValue):
"""Revert compute"""
def __init__(self, node_path, model, layer_path):
super(RevertCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE, [], model,
layer_path)
def redo(self):
super(RevertCompute, self).redo()
self.setText("Revert compute on {}".format(self.node_path))
class RenameAttribute(NxtCommand):
"""Rename attribute"""
def __init__(self, node_path, attr_name, new_attr_name, model, layer_path):
super(RenameAttribute, self).__init__(model)
self.node_path = node_path
self.attr_name = attr_name
self.new_attr_name = new_attr_name
self.model = model
self.stage = model.stage
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.new_attr_name, self.attr_name)
self.undo_effected_layer(layer.real_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.attr_name, self.new_attr_name)
self.redo_effected_layer(layer.real_path)
def rename_attribute(self, layer, attr_name, new_attr_name):
node = layer.lookup(self.node_path)
self.stage.rename_node_attr(node, attr_name, new_attr_name, layer)
self.model.update_comp_layer()
old_name = nxt_path.make_attr_path(self.node_path, attr_name)
new_name = nxt_path.make_attr_path(self.node_path, new_attr_name)
self.setText("Rename {} to {}".format(old_name, new_name))
class SetAttributeComment(SetNodeAttributeData):
"""Set attribute comment"""
def __init__(self, node_path, attr_name, comment, model, layer_path):
data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment}
super(SetAttributeComment, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(SetAttributeComment, self).redo()
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
self.setText("Changed comment on {}".format(attr_path))
class SetCompute(SetNodeAttributeValue):
"""Set node code value"""
def __init__(self, node_path, code_lines, model, layer_path):
super(SetCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(SetCompute, self).redo()
self.setText("Changed compute on {}".format(self.node_path))
class SetNodeComment(SetNodeAttributeValue):
"""Set node comment"""
def __init__(self, node_path, comment, model, layer_path):
super(SetNodeComment, self).__init__(node_path,
INTERNAL_ATTRS.COMMENT,
comment, model, layer_path)
def redo(self):
super(SetNodeComment, self).redo()
self.setText("Changed comment on {}".format(self.node_path))
class SetNodeInstance(SetNodeAttributeValue):
"""Set node instance"""
def __init__(self, node_path, instance_path, model, layer_path):
super(SetNodeInstance, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
instance_path, model, layer_path)
def redo(self):
super(SetNodeInstance, self).redo()
txt = ("Set inst path on "
"{} to {}".format(self.node_path,
self.data.get(META_ATTRS.VALUE)))
self.setText(txt)
class SetNodeEnabledState(SetNodeAttributeValue):
"""Set node enabled state"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeEnabledState, self).__init__(node_path,
INTERNAL_ATTRS.ENABLED,
value, model, layer_path)
def redo(self):
super(SetNodeEnabledState, self).redo()
if self.data.get(META_ATTRS.VALUE):
self.setText("Enabled {}".format(self.node_path))
else:
self.setText("Disabled {}".format(self.node_path))
class SetNodeCollapse(NxtCommand):
"""Set the node collapse state"""
def __init__(self, node_paths, value,
model, layer_path):
super(SetNodeCollapse, self).__init__(model)
self.node_paths = node_paths
self.value = value
self.model = model
self.stage = model.stage
self.layer_path = layer_path
self.prev_values = {}
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
for node_path, prev_value in self.prev_values.items():
layer.collapse[node_path] = prev_value
self.model.comp_layer.collapse[node_path] = prev_value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.prev_values = {}
for np in self.node_paths:
self.prev_values[np] = self.model.get_node_collapse(np, layer)
for node_path in self.node_paths:
layer.collapse[node_path] = self.value
self.model.comp_layer.collapse[node_path] = self.value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Collapsed {}".format(path_str))
else:
self.setText("Expanded {}".format(path_str))
class SetNodeExecuteSources(SetNodeAttributeValue):
"""Set node execute sources"""
def __init__(self, node_path, exec_source, model, layer_path):
super(SetNodeExecuteSources, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_source, model,
layer_path)
def redo(self):
super(SetNodeExecuteSources, self).redo()
val = self.data.get(META_ATTRS.VALUE)
if val is None:
self.setText("Removed exec input for {}".format(self.node_path))
return
self.setText("Set {} exec input to {}".format(self.node_path, val))
class SetNodeBreakPoint(QUndoCommand):
"""Set node as a break point"""
def __init__(self, node_paths, value, model, layer_path):
super(SetNodeBreakPoint, self).__init__()
self.node_paths = node_paths
self.value = value
self.model = model
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if not self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Add breakpoint to {}".format(path_str))
else:
self.setText("Remove breakpoint from {}".format(path_str))
class ClearBreakpoints(QUndoCommand):
"""Clear all the breakpoints for a given layer"""
def __init__(self, model, layer_path):
super(ClearBreakpoints, self).__init__()
self.model = model
self.layer_path = layer_path
self.prev_breaks = []
@processing
def undo(self):
user_dir.breakpoints[self.layer_path] = self.prev_breaks
self.model.nodes_changed.emit(tuple(self.prev_breaks))
@processing
def redo(self):
self.prev_breaks = user_dir.breakpoints.get(self.layer_path, [])
if self.layer_path in list(user_dir.breakpoints.keys()):
user_dir.breakpoints.pop(self.layer_path)
self.model.nodes_changed.emit(tuple(self.prev_breaks))
self.setText("Clear all breakpoints")
class SetNodeStartPoint(SetNodeAttributeValue):
"""Set this node as the execution start point"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeStartPoint, self).__init__(node_path,
INTERNAL_ATTRS.START_POINT,
value, model, layer_path)
class SetNodeChildOrder(SetNodeAttributeValue):
"""Set node child order"""
def __init__(self, node_path, child_order, model, layer_path):
super(SetNodeChildOrder, self).__init__(node_path,
INTERNAL_ATTRS.CHILD_ORDER,
child_order, model, layer_path)
def redo(self):
super(SetNodeChildOrder, self).redo()
self.setText("Change child order on {}".format(self.node_path))
class SetLayerAlias(NxtCommand):
"""Set Layer Alias"""
def __init__(self, alias, layer_path, model):
super(SetLayerAlias, self).__init__(model)
self.layer_path = layer_path
self.alias = alias
self.old_alias = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.set_alias(self.old_alias)
else:
layer.set_alias_over(self.old_alias)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_alias = layer.get_alias(local=True)
layer.set_alias(self.alias)
else:
self.old_alias = layer.get_alias(fallback_to_local=False)
layer.set_alias_over(self.alias)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
self.setText("Set {} alias to {}".format(layer.filepath, self.alias))
class NewLayer(NxtCommand):
"""Add new layer"""
def __init__(self, file_path, file_name, idx, model, chdir):
super(NewLayer, self).__init__(model)
self.new_layer_path = None
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.file_name = file_name
self.chdir = chdir
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.new_layer_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(LAYERS.TOP)
self.undo_effected_layer(self.new_layer_path)
self.model.layer_removed.emit(self.new_layer_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS]
open_layer_colors = []
for layer in self.stage._sub_layers:
color = layer.color
if color:
color = color.lower()
open_layer_colors += [color]
layer_color = layer_color_index[0]
for c in layer_color_index:
if c not in open_layer_colors:
layer_color = c
break
real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir)
layer_data = {"parent_layer": parent_layer,
SAVE_KEY.FILEPATH: self.file_path,
SAVE_KEY.REAL_PATH: real_path,
SAVE_KEY.COLOR: layer_color,
SAVE_KEY.ALIAS: self.file_name
}
new_layer = self.stage.new_sublayer(layer_data=layer_data,
idx=self.insert_idx)
self.new_layer_path = new_layer.real_path
self.redo_effected_layer(new_layer.real_path)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.new_layer_path)
self.model.layer_added.emit(self.new_layer_path)
self.setText("New layer {}".format(self.new_layer_path))
class ReferenceLayer(NxtCommand):
"""Refernce existing layer"""
def __init__(self, file_path, idx, model, chdir):
super(ReferenceLayer, self).__init__(model)
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.real_path = nxt_path.full_file_expand(self.file_path, chdir)
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.real_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.set_target_layer(LAYERS.TOP)
self.model.update_comp_layer(rebuild=True)
self.model.layer_removed.emit(self.real_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_data = nxt_io.load_file_data(self.real_path)
extra_data = {"parent_layer": parent_layer,
"filepath": self.file_path,
"real_path": self.real_path,
"alias": layer_data['name']
}
layer_data.update(extra_data)
self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.real_path)
self.model.layer_added.emit(self.real_path)
self.setText("Added reference to {}".format(self.real_path))
class RemoveLayer(ReferenceLayer):
"""Remove existing layer"""
def __init__(self, layer_path, model):
idx = model.lookup_layer(layer_path).layer_idx()
super(RemoveLayer, self).__init__(layer_path, idx, model, None)
self.text = "Removed reference to {}".format(layer_path)
@processing
def undo(self):
super(RemoveLayer, self).redo()
self.setText(self.text)
@processing
def redo(self):
super(RemoveLayer, self).undo()
self.setText(self.text)
class MuteToggleLayer(NxtCommand):
"""Toggles muting an existing layer"""
def __init__(self, layer_path, model):
super(MuteToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_muted(local=True)
layer.set_muted(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_muted(local=False)
self.model.top_layer.set_mute_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_mute_changed.emit((self.layer_path,))
self.setText("Toggle {} muted.".format(layer.get_alias()))
class SoloToggleLayer(NxtCommand):
"""Toggles soloing an existing layer"""
def __init__(self, layer_path, model):
super(SoloToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_soloed(local=True)
layer.set_soloed(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_soloed(local=False)
self.model.top_layer.set_solo_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_solo_changed.emit((self.layer_path,))
self.setText("Toggle {} soloed.".format(layer.get_alias()))
class SetLayerColor(NxtCommand):
def __init__(self, color, layer_path, model):
"""Sets the color for a given layer, if the layer is not a top layer
the top layer store an overrides.
:param color: string of new layer alias (name)
:param layer_path: real path of layer
:param model: StageModel
"""
super(SetLayerColor, self).__init__(model)
self.layer_path = layer_path
self.color = color
self.old_color = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.color = self.old_color
else:
layer.set_color_over(self.old_color)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_color = layer.get_color(local=True)
layer.color = self.color
else:
self.old_color = layer.get_color(fallback_to_local=False)
layer.set_color_over(self.color)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
self.setText("Set {} color to {}".format(layer.filepath, self.color))
def _add_node_hierarchy(base_node_path, model, layer):
stage = model.stage
comp_layer = model.comp_layer
new_node_paths = []
new_nodes = []
node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path)
new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy,
parent=None, layer=layer,
comp_layer=comp_layer)
for nn_p, n in new_node_table:
display_node = comp_layer.lookup(nn_p)
if display_node is not None:
display_child_order = getattr(display_node,
INTERNAL_ATTRS.CHILD_ORDER)
old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER)
new_child_order = list_merger(display_child_order,
old_child_order)
setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order)
new_node_paths += [nn_p]
new_nodes += [n]
return new_nodes, new_node_paths, dirty
def undo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug("Undo " + cmd.text() + " | " + update_time + "ms")
def redo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug(cmd.text() + " | " + update_time + "ms")
|
premium/backend/src/baserow_premium/api/admin/dashboard/views.py | cjh0613/baserow | 839 | 4553 | <reponame>cjh0613/baserow
from datetime import timedelta
from django.contrib.auth import get_user_model
from drf_spectacular.utils import extend_schema
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from rest_framework.views import APIView
from baserow.api.decorators import accept_timezone
from baserow.core.models import Group, Application
from baserow_premium.admin.dashboard.handler import AdminDashboardHandler
from .serializers import AdminDashboardSerializer
User = get_user_model()
class AdminDashboardView(APIView):
permission_classes = (IsAdminUser,)
@extend_schema(
tags=["Admin"],
operation_id="admin_dashboard",
description="Returns the new and active users for the last 24 hours, 7 days and"
" 30 days. The `previous_` values are the values of the period before, so for "
"example `previous_new_users_last_24_hours` are the new users that signed up "
"from 48 to 24 hours ago. It can be used to calculate an increase or decrease "
"in the amount of signups. A list of the new and active users for every day "
"for the last 30 days is also included.\n\nThis is a **premium** feature.",
responses={
200: AdminDashboardSerializer,
401: None,
},
)
@accept_timezone()
def get(self, request, now):
"""
Returns the new and active users for the last 24 hours, 7 days and 30 days.
The `previous_` values are the values of the period before, so for example
`previous_new_users_last_24_hours` are the new users that signed up from 48
to 24 hours ago. It can be used to calculate an increase or decrease in the
amount of signups. A list of the new and active users for every day for the
last 30 days is also included.
"""
handler = AdminDashboardHandler()
total_users = User.objects.filter(is_active=True).count()
total_groups = Group.objects.all().count()
total_applications = Application.objects.all().count()
new_users = handler.get_new_user_counts(
{
"new_users_last_24_hours": timedelta(hours=24),
"new_users_last_7_days": timedelta(days=7),
"new_users_last_30_days": timedelta(days=30),
},
include_previous=True,
)
active_users = handler.get_active_user_count(
{
"active_users_last_24_hours": timedelta(hours=24),
"active_users_last_7_days": timedelta(days=7),
"active_users_last_30_days": timedelta(days=30),
},
include_previous=True,
)
new_users_per_day = handler.get_new_user_count_per_day(
timedelta(days=30), now=now
)
active_users_per_day = handler.get_active_user_count_per_day(
timedelta(days=30), now=now
)
serializer = AdminDashboardSerializer(
{
"total_users": total_users,
"total_groups": total_groups,
"total_applications": total_applications,
"new_users_per_day": new_users_per_day,
"active_users_per_day": active_users_per_day,
**new_users,
**active_users,
}
)
return Response(serializer.data)
|
lib/networks/Resnet50_train.py | yangxue0827/TF_Deformable_Net | 193 | 4570 | <reponame>yangxue0827/TF_Deformable_Net
# --------------------------------------------------------
# TFFRCNN - Resnet50
# Copyright (c) 2016
# Licensed under The MIT License [see LICENSE for details]
# Written by miraclebiu
# --------------------------------------------------------
import tensorflow as tf
from .network import Network
from ..fast_rcnn.config import cfg
class Resnet50_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data')
self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')
self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes')
self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')
self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\
'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})
self.trainable = trainable
self.setup()
def setup(self):
n_classes = cfg.NCLASSES
# anchor_scales = [8, 16, 32]
anchor_scales = cfg.ANCHOR_SCALES
_feat_stride = [16, ]
(self.feed('data')
.conv(7, 7, 64, 2, 2, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1', is_training=False)
.max_pool(3, 3, 2, 2, padding='VALID',name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1',is_training=False,relu=False))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c',is_training=False,relu=False))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c',is_training=False,relu=False))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c',is_training=False,relu=False))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID')
.batch_normalization(name='bn3a_branch1',is_training=False,relu=False))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn3a_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c',is_training=False,relu=False))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
.batch_normalization(relu=True, name='bn3b_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
.batch_normalization(relu=True, name='bn3b_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
.batch_normalization(name='bn3b_branch2c',is_training=False,relu=False))
(self.feed('res3a_relu',
'bn3b_branch2c')
.add(name='res3b')
.relu(name='res3b_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
.batch_normalization(relu=True, name='bn3c_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
.batch_normalization(relu=True, name='bn3c_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
.batch_normalization(name='bn3c_branch2c',is_training=False,relu=False))
(self.feed('res3b_relu',
'bn3c_branch2c')
.add(name='res3c')
.relu(name='res3c_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
.batch_normalization(relu=True, name='bn3d_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
.batch_normalization(relu=True, name='bn3d_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
.batch_normalization(name='bn3d_branch2c',is_training=False,relu=False))
(self.feed('res3c_relu',
'bn3d_branch2c')
.add(name='res3d')
.relu(name='res3d_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID')
.batch_normalization(name='bn4a_branch1',is_training=False,relu=False))
(self.feed('res3d_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn4a_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c',is_training=False,relu=False))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
.batch_normalization(relu=True, name='bn4b_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
.batch_normalization(relu=True, name='bn4b_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
.batch_normalization(name='bn4b_branch2c',is_training=False,relu=False))
(self.feed('res4a_relu',
'bn4b_branch2c')
.add(name='res4b')
.relu(name='res4b_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
.batch_normalization(relu=True, name='bn4c_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
.batch_normalization(relu=True, name='bn4c_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
.batch_normalization(name='bn4c_branch2c',is_training=False,relu=False))
(self.feed('res4b_relu',
'bn4c_branch2c')
.add(name='res4c')
.relu(name='res4c_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
.batch_normalization(relu=True, name='bn4d_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
.batch_normalization(relu=True, name='bn4d_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
.batch_normalization(name='bn4d_branch2c',is_training=False,relu=False))
(self.feed('res4c_relu',
'bn4d_branch2c')
.add(name='res4d')
.relu(name='res4d_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
.batch_normalization(relu=True, name='bn4e_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
.batch_normalization(relu=True, name='bn4e_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
.batch_normalization(name='bn4e_branch2c',is_training=False,relu=False))
(self.feed('res4d_relu',
'bn4e_branch2c')
.add(name='res4e')
.relu(name='res4e_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
.batch_normalization(relu=True, name='bn4f_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
.batch_normalization(relu=True, name='bn4f_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
.batch_normalization(name='bn4f_branch2c',is_training=False,relu=False))
(self.feed('res4e_relu',
'bn4f_branch2c')
.add(name='res4f')
.relu(name='res4f_relu'))
#========= RPN ============
(self.feed('res4f_relu')
.conv(3,3,512,1,1,name='rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))
(self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info')
.anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))
# Loss of rpn_cls & rpn_boxes
(self.feed('rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
(self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas')
.proposal_target_layer(n_classes,name = 'roi-data'))
#========= RCNN ============
(self.feed('res4f_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID')
.batch_normalization(relu=False, name='bn5a_branch1'))
(self.feed('res4f_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID')
.batch_normalization(relu=False, name='bn5a_branch2a')
.relu(name='res5a_branch2a_relu')
.conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros'))
(self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset')
.deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b')
.batch_normalization(relu=False, name='bn5a_branch2b')
.relu(name='res5a_branch2b_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID')
.batch_normalization(relu=False, name='bn5a_branch2c'))
(self.feed('bn5a_branch1', 'bn5a_branch2c')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID')
.batch_normalization(relu=False, name='bn5b_branch2a')
.relu(name='res5b_branch2a_relu')
.conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros'))
(self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset')
.deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b')
.batch_normalization(relu=False, name='bn5b_branch2b')
.relu(name='res5b_branch2b_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID')
.batch_normalization(relu=False, name='bn5b_branch2c'))
(self.feed('res5a_relu', 'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID')
.batch_normalization(relu=False, name='bn5c_branch2a')
.relu(name='res5c_branch2a_relu')
.conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') )
(self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset')
.deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b')
.batch_normalization(relu=False, name='bn5c_branch2b')
.relu(name='res5c_branch2b_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID')
.batch_normalization(relu=False, name='bn5c_branch2c'))
(self.feed('res5b_relu', 'bn5c_branch2c')
.add(name='res5c')
.relu(name='res5c_relu')
.conv(1, 1, 256, 1, 1, relu=False, name='conv_new_1')
.relu(name='conv_new_1_relu'))
(self.feed('conv_new_1_relu', 'roi-data')
.deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t')
# .flatten_data(name='offset_flatten')
.fc(num_out=7 * 7 * 2, name='offset', relu=False)
.reshape(shape=(-1,2,7,7), name='offset_reshape'))
(self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape')
.deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool')
.fc(num_out=1024, name='fc_new_1')
.fc(num_out=1024, name='fc_new_2'))
(self.feed('fc_new_2')
.fc(num_out=n_classes, name='cls_score', relu=False)
.softmax(name='cls_prob'))
(self.feed('fc_new_2')
.fc(num_out=4*n_classes, name='bbox_pred', relu=False))
# (self.feed('res4f_relu','roi-data')
# .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling')
# .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID')
# .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False)
# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')
# .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False)
# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
# .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False))
# (self.feed('res5a_branch2a_roipooling')
# .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID')
# .batch_normalization(name='bn5a_branch1',is_training=False,relu=False))
# (self.feed('bn5a_branch2c','bn5a_branch1')
# .add(name='res5a')
# .relu(name='res5a_relu')
# .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
# .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False)
# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')
# .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False)
# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
# .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False))
# #pdb.set_trace()
# (self.feed('res5a_relu',
# 'bn5b_branch2c')
# .add(name='res5b')
# .relu(name='res5b_relu')
# .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
# .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False)
# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')
# .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False)
# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
# .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False))
# #pdb.set_trace()
# (self.feed('res5b_relu',
# 'bn5c_branch2c')
# .add(name='res5c')
# .relu(name='res5c_relu')
# .fc(n_classes, relu=False, name='cls_score')
# .softmax(name='cls_prob'))
# (self.feed('res5c_relu')
# .fc(n_classes*4, relu=False, name='bbox_pred'))
|
lib/aws_sso_lib/assignments.py | vdesjardins/aws-sso-util | 330 | 4571 | import re
import numbers
import collections
import logging
from collections.abc import Iterable
import itertools
import aws_error_utils
from .lookup import Ids, lookup_accounts_for_ou
from .format import format_account_id
LOGGER = logging.getLogger(__name__)
_Context = collections.namedtuple("_Context", [
"session",
"ids",
"principal",
"principal_filter",
"permission_set",
"permission_set_filter",
"target",
"target_filter",
"get_principal_names",
"get_permission_set_names",
"get_target_names",
"ou_recursive",
"cache",
"filter_cache"
])
def _filter(filter_cache, key, func, args):
if not func:
return True
if key not in filter_cache:
filter_cache[key] = func(*args)
return filter_cache[key]
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
def _is_principal_tuple(principal):
try:
return all([
len(principal) == 2,
isinstance(principal[0], str),
principal[0] in ["GROUP", "USER"],
isinstance(principal[1], str),
])
except:
return False
def _process_principal(principal):
if not principal:
return None
if isinstance(principal, str):
return [(None, principal)]
if _is_principal_tuple(principal):
return [tuple(principal)]
else:
return _flatten(_process_principal(p) for p in principal)
def _process_permission_set(ids, permission_set):
if not permission_set:
return None
if not isinstance(permission_set, str) and isinstance(permission_set, Iterable):
return _flatten(_process_permission_set(ids, ps) for ps in permission_set)
if permission_set.startswith("arn"):
permission_set_arn = permission_set
elif permission_set.startswith("ssoins-") or permission_set.startswith("ins-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{permission_set}"
elif permission_set.startswith("ps-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}"
else:
raise TypeError(f"Invalid permission set id {permission_set}")
return [permission_set_arn]
def _is_target_tuple(target):
try:
return all([
len(target) == 2,
isinstance(target[0], str),
target[0] in ["AWS_OU", "AWS_ACCOUNT"],
isinstance(target[1], str),
])
except:
return False
def _process_target(target):
if not target:
return None
if isinstance(target, numbers.Number):
return [("AWS_ACCOUNT", format_account_id(target))]
if isinstance(target, str):
if re.match(r"^\d+$", target):
return [("AWS_ACCOUNT", format_account_id(target))]
elif re.match(r"^r-[a-z0-9]{4,32}$", target) or re.match(r"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$", target):
return [("AWS_OU", target)]
else:
raise TypeError(f"Invalid target {target}")
elif _is_target_tuple(target):
target_type, target_id = target
if target_type not in ["AWS_ACCOUNT", "AWS_OU"]:
raise TypeError(f"Invalid target type {target_type}")
return [(target_type, target_id)]
else:
value = _flatten(_process_target(t) for t in target)
return value
def _get_account_iterator(target, context: _Context):
def target_iterator():
target_name = None
if context.get_target_names:
organizations_client = context.session.client("organizations")
account = organizations_client.describe_account(AccountId=target[1])["Account"]
if account.get("Name"):
target_name = account["Name"]
value = (*target, target_name)
if not _filter(context.filter_cache, value[1], context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
else:
LOGGER.debug(f"Visiting single account: {value}")
yield value
return target_iterator
def _get_ou_iterator(target, context: _Context):
def target_iterator():
target_name = None
# if context.get_target_names:
# organizations_client = context.session.client("organizations")
# ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])["OrganizationalUnit"]
# if ou.get("Name"):
# target_name = ou("Name")
value = (*target, target_name)
accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive)
for account in accounts:
yield "AWS_ACCOUNT", account["Id"], account["Name"]
return target_iterator
def _get_single_target_iterator(target, context: _Context):
target_type = target[0]
if target_type == "AWS_ACCOUNT":
return _get_account_iterator(target, context)
elif target_type == "AWS_OU":
return _get_ou_iterator(target, context)
else:
raise TypeError(f"Invalid target type {target_type}")
def _get_all_accounts_iterator(context: _Context):
def target_iterator():
organizations_client = context.session.client("organizations")
accounts_paginator = organizations_client.get_paginator("list_accounts")
for response in accounts_paginator.paginate():
LOGGER.debug(f"ListAccounts page: {response}")
for account in response["Accounts"]:
account_id = account["Id"]
account_name = account["Name"]
value = ("AWS_ACCOUNT", account_id, account_name)
if not _filter(context.filter_cache, account_id, context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
continue
LOGGER.debug(f"Visiting account: {value}")
yield value
return target_iterator
def _get_target_iterator(context: _Context):
if context.target:
iterables = [_get_single_target_iterator(t, context) for t in context.target]
def target_iterator():
return itertools.chain(*[it() for it in iterables])
return target_iterator
else:
LOGGER.debug(f"Iterating for all accounts")
return _get_all_accounts_iterator(context)
def _get_single_permission_set_iterator(permission_set, context: _Context):
permission_set_arn = permission_set
permission_set_id = permission_set_arn.split("/")[-1]
def permission_set_iterator(target_type, target_id, target_name):
if not context.get_permission_set_names:
permission_set_name = None
else:
sso_admin_client = context.session.client("sso-admin")
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
permission_set_name = response["PermissionSet"]["Name"]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Single permission set is filtered: {(permission_set_id, permission_set_name)}")
else:
LOGGER.debug(f"Visiting single permission set {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_all_permission_sets_iterator(context: _Context):
def permission_set_iterator(target_type, target_id, target_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
permission_sets_paginator = sso_admin_client.get_paginator("list_permission_sets_provisioned_to_account")
for response in permission_sets_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id):
LOGGER.debug(f"ListPermissionSetsProvisionedToAccount {target_id} page: {response}")
if "PermissionSets" not in response:
continue
for permission_set_arn in response["PermissionSets"]:
permission_set_id = permission_set_arn.split("/", 2)[-1]
if not context.get_permission_set_names:
permission_set_name = None
else:
if permission_set_arn not in context.cache:
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
context.cache[permission_set_arn] = response["PermissionSet"]["Name"]
permission_set_name = context.cache[permission_set_arn]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Permission set is filtered: {(permission_set_id, permission_set_name)}")
continue
LOGGER.debug(f"Visiting permission set: {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_permission_set_iterator(context: _Context):
if context.permission_set:
iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set]
def permission_set_iterator(target_type, target_id, target_name):
return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables])
return permission_set_iterator
else:
LOGGER.debug("Iterating for all permission sets")
return _get_all_permission_sets_iterator(context)
def _get_principal_iterator(context: _Context):
def principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
identity_store_client = context.session.client("identitystore")
assignments_paginator = sso_admin_client.get_paginator("list_account_assignments")
for response in assignments_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id,
PermissionSetArn=permission_set_arn):
LOGGER.debug(f"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}")
if not response["AccountAssignments"] and not "NextToken" in response:
LOGGER.debug(f"No assignments for {target_id} {permission_set_arn.split('/')[-1]}")
for assignment in response["AccountAssignments"]:
principal_type = assignment["PrincipalType"]
principal_id = assignment["PrincipalId"]
LOGGER.debug(f"Visiting principal {principal_type}:{principal_id}")
if context.principal:
for principal in context.principal:
type_matches = (principal[0] is None or principal[0] != principal_type)
if type_matches and principal[1] == principal_id:
LOGGER.debug(f"Found principal {principal_type}:{principal_id}")
break
else:
LOGGER.debug(f"Principal {principal_type}:{principal_id} does not match principals")
continue
principal_key = (principal_type, principal_id)
if not context.get_principal_names:
principal_name = None
else:
if principal_key not in context.cache:
if principal_type == "GROUP":
try:
response = identity_store_client.describe_group(
IdentityStoreId=context.ids.identity_store_id,
GroupId=principal_id
)
LOGGER.debug(f"DescribeGroup response: {response}")
context.cache[principal_key] = response["DisplayName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
elif principal_type == "USER":
try:
response = identity_store_client.describe_user(
IdentityStoreId=context.ids.identity_store_id,
UserId=principal_id
)
LOGGER.debug(f"DescribeUser response: {response}")
context.cache[principal_key] = response["UserName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
else:
raise ValueError(f"Unknown principal type {principal_type}")
principal_name = context.cache[principal_key]
if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)):
if context.principal:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
else:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
continue
LOGGER.debug(f"Visiting principal: {principal_type}:{principal_id}")
yield principal_type, principal_id, principal_name
return principal_iterator
Assignment = collections.namedtuple("Assignment", [
"instance_arn",
"principal_type",
"principal_id",
"principal_name",
"permission_set_arn",
"permission_set_name",
"target_type",
"target_id",
"target_name",
])
def list_assignments(
session,
instance_arn=None,
identity_store_id=None,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
"""Iterate over AWS SSO assignments.
Args:
session (boto3.Session): boto3 session to use
instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances
identity_store_id (str): The identity store to use if principal names are being retrieved
or it will be looked up using ListInstances
principal: A principal specification or list of principal specifications.
A principal specification is a principal id or a 2-tuple of principal type and id.
principal_filter: A callable taking principal type, principal id, and principal name
(which may be None), and returning True if the principal should be included.
permission_set: A permission set arn or id, or a list of the same.
permission_set_filter: A callable taking permission set arn and name (name may be None),
returning True if the permission set should be included.
target: A target specification or list of target specifications.
A target specification is an account or OU id, or a 2-tuple of target type, which
is either AWS_ACCOUNT or AWS_OU, and target id.
target_filter: A callable taking target type, target id, and target name
(which may be None), and returning True if the target should be included.
get_principal_names (bool): Retrieve names for principals in assignments.
get_permission_set_names (bool): Retrieve names for permission sets in assignments.
get_target_names (bool): Retrieve names for targets in assignments.
ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts
including those in child OUs.
Returns:
An iterator over Assignment namedtuples
"""
ids = Ids(lambda: session, instance_arn, identity_store_id)
return _list_assignments(
session,
ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
)
def _list_assignments(
session,
ids,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
principal = _process_principal(principal)
permission_set = _process_permission_set(ids, permission_set)
target = _process_target(target)
cache = {}
filter_cache = {}
context = _Context(
session = session,
ids=ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
cache=cache,
filter_cache=filter_cache,
)
target_iterator = _get_target_iterator(context)
permission_set_iterator = _get_permission_set_iterator(context)
principal_iterator = _get_principal_iterator(context)
for target_type, target_id, target_name in target_iterator():
for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name):
for principal_type, principal_id, principal_name in principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
assignment = Assignment(
ids.instance_arn,
principal_type,
principal_id,
principal_name,
permission_set_arn,
permission_set_name,
target_type,
target_id,
target_name,
)
LOGGER.debug(f"Visiting assignment: {assignment}")
yield assignment
if __name__ == "__main__":
import boto3
import sys
import json
logging.basicConfig(level=logging.INFO)
kwargs = {}
for v in sys.argv[1:]:
if hasattr(logging, v):
LOGGER.setLevel(getattr(logging, v))
else:
kwargs = json.loads(v)
def fil(*args):
print(args)
return True
kwargs["target_filter"] = fil
try:
session = boto3.Session()
print(",".join(Assignment._fields))
for value in list_assignments(session, **kwargs):
print(",".join(v or "" for v in value))
except KeyboardInterrupt:
pass
|
tensorhive/config.py | roscisz/TensorHive | 129 | 4588 | from pathlib import PosixPath
import configparser
from typing import Dict, Optional, Any, List
from inspect import cleandoc
import shutil
import tensorhive
import os
import logging
log = logging.getLogger(__name__)
class CONFIG_FILES:
# Where to copy files
# (TensorHive tries to load these by default)
config_dir = PosixPath.home() / '.config/TensorHive'
MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini')
HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini')
MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini')
# Where to get file templates from
# (Clone file when it's not found in config directory)
tensorhive_package_dir = PosixPath(__file__).parent
MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini')
HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini')
MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini')
ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini')
MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations')
class ConfigInitilizer:
'''Makes sure that all default config files exist'''
def __init__(self):
# 1. Check if all config files exist
all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists()
if not all_exist:
log.warning('[•] Detected missing default config file(s), recreating...')
self.recreate_default_configuration_files()
log.info('[•] All configs already exist, skipping...')
def recreate_default_configuration_files(self) -> None:
try:
# 1. Create directory for stroing config files
CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True)
# 2. Clone templates safely from `tensorhive` package
self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH)
# 3. Change config files permission
rw_owner_only = 0o600
os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only)
except Exception:
log.error('[✘] Unable to recreate configuration files.')
def safe_copy(self, src: str, dst: str) -> None:
'''Safe means that it won't override existing configuration'''
if PosixPath(dst).exists():
log.info('Skipping, file already exists: {}'.format(dst))
else:
shutil.copy(src, dst)
log.info('Copied {} to {}'.format(src, dst))
class ConfigLoader:
@staticmethod
def load(path, displayed_title=''):
import configparser
config = configparser.ConfigParser(strict=False)
full_path = PosixPath(path).expanduser()
if config.read(str(full_path)):
log.info('[•] Reading {} config from {}'.format(displayed_title, full_path))
else:
log.warning('[✘] Configuration file not found ({})'.format(full_path))
log.info('Using default {} settings from config.py'.format(displayed_title))
return config
ConfigInitilizer()
config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main')
def display_config(cls):
'''
Displays all uppercase class atributes (class must be defined first)
Example usage: display_config(API_SERVER)
'''
print('[{class_name}]'.format(class_name=cls.__name__))
for key, value in cls.__dict__.items():
if key.isupper():
print('{} = {}'.format(key, value))
def check_env_var(name: str):
'''Makes sure that env variable is declared'''
if not os.getenv(name):
msg = cleandoc(
'''
{env} - undeclared environment variable!
Try this: `export {env}="..."`
''').format(env=name).split('\n')
log.warning(msg[0])
log.warning(msg[1])
class SSH:
section = 'ssh'
HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH)
TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True)
TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0)
NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1)
KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key')
def hosts_config_to_dict(path: str) -> Dict: # type: ignore
'''Parses sections containing hostnames'''
hosts_config = ConfigLoader.load(path, displayed_title='hosts')
result = {}
for section in hosts_config.sections():
# We want to parse only sections which describe target hosts
if section == 'proxy_tunneling':
continue
hostname = section
result[hostname] = {
'user': hosts_config.get(hostname, 'user'),
'port': hosts_config.getint(hostname, 'port', fallback=22)
}
return result
def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore
'''Parses [proxy_tunneling] section'''
config = ConfigLoader.load(path, displayed_title='proxy')
section = 'proxy_tunneling'
# Check if section is present and if yes, check if tunneling is enabled
if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False):
return {
'proxy_host': config.get(section, 'proxy_host'),
'proxy_user': config.get(section, 'proxy_user'),
'proxy_port': config.getint(section, 'proxy_port', fallback=22)
}
else:
return None
AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE)
PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE)
class DB:
section = 'database'
default_path = '~/.config/TensorHive/database.sqlite'
def uri_for_path(path: str) -> str: # type: ignore
return 'sqlite:///{}'.format(PosixPath(path).expanduser())
SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path))
TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite)
class API:
section = 'api'
TITLE = config.get(section, 'title', fallback='TensorHive API')
URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0')
URL_PREFIX = config.get(section, 'url_prefix', fallback='api')
SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml')
IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers')
import yaml
respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml')
with open(respones_file_path, 'r') as file:
RESPONSES = yaml.safe_load(file)
class APP_SERVER:
section = 'web_app.server'
BACKEND = config.get(section, 'backend', fallback='gunicorn')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=5000)
WORKERS = config.getint(section, 'workers', fallback=4)
LOG_LEVEL = config.get(section, 'loglevel', fallback='warning')
class API_SERVER:
section = 'api.server'
BACKEND = config.get(section, 'backend', fallback='gevent')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=1111)
DEBUG = config.getboolean(section, 'debug', fallback=False)
class MONITORING_SERVICE:
section = 'monitoring_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
class PROTECTION_SERVICE:
section = 'protection_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True)
NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False)
class MAILBOT:
mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot')
section = 'general'
INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0)
MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section,
'max_emails_per_protection_interval', fallback=50)
NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True)
NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False)
ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None)
section = 'smtp'
SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None)
SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None)
SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None)
SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587)
section = 'template/intruder'
INTRUDER_SUBJECT = mailbot_config.get(section, 'subject')
INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
section = 'template/admin'
ADMIN_SUBJECT = mailbot_config.get(section, 'subject')
ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
class USAGE_LOGGING_SERVICE:
section = 'usage_logging_service'
default_path = '~/.config/TensorHive/logs/'
def full_path(path: str) -> str: # type: ignore
return str(PosixPath(path).expanduser())
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path))
LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2)
class JOB_SCHEDULING_SERVICE:
section = 'job_scheduling_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0)
STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0)
SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, "schedule_queued_jobs_when_free_mins", fallback=30)
class AUTH:
from datetime import timedelta
section = 'auth'
def config_get_parsed(option: str, fallback: Any) -> List[str]: # type: ignore
'''
Parses value for option from string to a valid python list.
Fallback value is returned when anything goes wrong (e.g. option or value not present)
Example .ini file, function called with arguments: option='some_option', fallback=None
[some_section]
some_option = ['foo', 'bar']
Will return:
['foo', 'bar']
'''
import ast
try:
raw_arguments = config.get('auth', option)
parsed_arguments = ast.literal_eval(raw_arguments)
return parsed_arguments
except (configparser.Error, ValueError):
log.warning('Parsing [auth] config section failed for option "{}", using fallback value: {}'.format(
option, fallback))
return fallback
FLASK_JWT = {
'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'),
'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True),
'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']),
'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True),
'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes',
fallback=1)),
'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days',
fallback=1)),
'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location', fallback=['headers'])
}
|
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py | xuyannus/Machine-Learning-Collection | 3,094 | 4590 | <filename>ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py<gh_stars>1000+
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import numpy as np
import spacy
import random
from torch.utils.tensorboard import SummaryWriter # to print to tensorboard
from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint
spacy_ger = spacy.load("de")
spacy_eng = spacy.load("en")
def tokenize_ger(text):
return [tok.text for tok in spacy_ger.tokenizer(text)]
def tokenize_eng(text):
return [tok.text for tok in spacy_eng.tokenizer(text)]
german = Field(tokenize=tokenize_ger, lower=True, init_token="<sos>", eos_token="<eos>")
english = Field(
tokenize=tokenize_eng, lower=True, init_token="<sos>", eos_token="<eos>"
)
train_data, valid_data, test_data = Multi30k.splits(
exts=(".de", ".en"), fields=(german, english)
)
german.build_vocab(train_data, max_size=10000, min_freq=2)
english.build_vocab(train_data, max_size=10000, min_freq=2)
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
def forward(self, x):
# x shape: (seq_length, N) where N is batch size
embedding = self.dropout(self.embedding(x))
# embedding shape: (seq_length, N, embedding_size)
outputs, (hidden, cell) = self.rnn(embedding)
# outputs shape: (seq_length, N, hidden_size)
return hidden, cell
class Decoder(nn.Module):
def __init__(
self, input_size, embedding_size, hidden_size, output_size, num_layers, p
):
super(Decoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden, cell):
# x shape: (N) where N is for batch size, we want it to be (1, N), seq_length
# is 1 here because we are sending in a single word and not a sentence
x = x.unsqueeze(0)
embedding = self.dropout(self.embedding(x))
# embedding shape: (1, N, embedding_size)
outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell))
# outputs shape: (1, N, hidden_size)
predictions = self.fc(outputs)
# predictions shape: (1, N, length_target_vocabulary) to send it to
# loss function we want it to be (N, length_target_vocabulary) so we're
# just gonna remove the first dim
predictions = predictions.squeeze(0)
return predictions, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, source, target, teacher_force_ratio=0.5):
batch_size = source.shape[1]
target_len = target.shape[0]
target_vocab_size = len(english.vocab)
outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device)
hidden, cell = self.encoder(source)
# Grab the first input to the Decoder which will be <SOS> token
x = target[0]
for t in range(1, target_len):
# Use previous hidden, cell as context from encoder at start
output, hidden, cell = self.decoder(x, hidden, cell)
# Store next output prediction
outputs[t] = output
# Get the best word the Decoder predicted (index in the vocabulary)
best_guess = output.argmax(1)
# With probability of teacher_force_ratio we take the actual next word
# otherwise we take the word that the Decoder predicted it to be.
# Teacher Forcing is used so that the model gets used to seeing
# similar inputs at training and testing time, if teacher forcing is 1
# then inputs at test time might be completely different than what the
# network is used to. This was a long comment.
x = target[t] if random.random() < teacher_force_ratio else best_guess
return outputs
### We're ready to define everything we need for training our Seq2Seq model ###
# Training hyperparameters
num_epochs = 100
learning_rate = 0.001
batch_size = 64
# Model hyperparameters
load_model = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_size_encoder = len(german.vocab)
input_size_decoder = len(english.vocab)
output_size = len(english.vocab)
encoder_embedding_size = 300
decoder_embedding_size = 300
hidden_size = 1024 # Needs to be the same for both RNN's
num_layers = 2
enc_dropout = 0.5
dec_dropout = 0.5
# Tensorboard to get nice loss plot
writer = SummaryWriter(f"runs/loss_plot")
step = 0
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=True,
sort_key=lambda x: len(x.src),
device=device,
)
encoder_net = Encoder(
input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout
).to(device)
decoder_net = Decoder(
input_size_decoder,
decoder_embedding_size,
hidden_size,
output_size,
num_layers,
dec_dropout,
).to(device)
model = Seq2Seq(encoder_net, decoder_net).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
pad_idx = english.vocab.stoi["<pad>"]
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
if load_model:
load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer)
sentence = "ein boot mit mehreren männern darauf wird von einem großen pferdegespann ans ufer gezogen."
for epoch in range(num_epochs):
print(f"[Epoch {epoch} / {num_epochs}]")
checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()}
save_checkpoint(checkpoint)
model.eval()
translated_sentence = translate_sentence(
model, sentence, german, english, device, max_length=50
)
print(f"Translated example sentence: \n {translated_sentence}")
model.train()
for batch_idx, batch in enumerate(train_iterator):
# Get input and targets and get to cuda
inp_data = batch.src.to(device)
target = batch.trg.to(device)
# Forward prop
output = model(inp_data, target)
# Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss
# doesn't take input in that form. For example if we have MNIST we want to have
# output to be: (N, 10) and targets just (N). Here we can view it in a similar
# way that we have output_words * batch_size that we want to send in into
# our cost function, so we need to do some reshapin. While we're at it
# Let's also remove the start token while we're at it
output = output[1:].reshape(-1, output.shape[2])
target = target[1:].reshape(-1)
optimizer.zero_grad()
loss = criterion(output, target)
# Back prop
loss.backward()
# Clip to avoid exploding gradient issues, makes sure grads are
# within a healthy range
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
# Gradient descent step
optimizer.step()
# Plot to tensorboard
writer.add_scalar("Training loss", loss, global_step=step)
step += 1
score = bleu(test_data[1:100], model, german, english, device)
print(f"Bleu score {score*100:.2f}")
|
test/examples/integrated/codec/vip/vip_agent.py | rodrigomelo9/uvm-python | 140 | 4618 | <gh_stars>100-1000
#//
#// -------------------------------------------------------------
#// Copyright 2011 Synopsys, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
from uvm import *
from .vip_sequencer import vip_sequencer
from .vip_driver import vip_driver
from .vip_monitor import vip_monitor
class vip_agent(UVMAgent):
def __init__(self, name, parent=None):
super().__init__(name, parent)
self.hier_objection = False
def build_phase(self, phase):
self.sqr = vip_sequencer.type_id.create("sqr", self)
self.drv = vip_driver.type_id.create("drv", self)
self.tx_mon = vip_monitor.type_id.create("tx_mon", self)
self.rx_mon = vip_monitor.type_id.create("rx_mon", self)
self.rx_mon.hier_objection = self.hier_objection
self.tx_mon.hier_objection = self.hier_objection
self.drv.hier_objection = self.hier_objection
vif = []
if not UVMConfigDb.get(self, "", "vif", vif):
uvm_fatal("VIP/AGT/NOVIF", "No virtual interface specified for self agent instance")
self.vif = vif[0]
UVMConfigDb.set(self, "tx_mon", "vif", self.vif.tx_mon)
UVMConfigDb.set(self, "rx_mon", "vif", self.vif.rx)
def connect_phase(self, phase):
self.drv.seq_item_port.connect(self.sqr.seq_item_export)
async def pre_reset_phase(self, phase):
if self.hier_objection:
phase.raise_objection(self, "Resetting agent")
await self.reset_and_suspend()
if self.hier_objection:
print("vip_agent dropping objection")
phase.drop_objection(self)
async def reset_and_suspend(self):
#fork
await sv.fork_join([
cocotb.fork(self.drv.reset_and_suspend()),
cocotb.fork(self.tx_mon.reset_and_suspend()),
cocotb.fork(self.rx_mon.reset_and_suspend())
])
#join
self.sqr.stop_sequences()
async def suspend(self):
await sv.fork_join([
# fork
cocotb.fork(self.drv.suspend()),
cocotb.fork(self.tx_mon.suspend()),
cocotb.fork(self.rx_mon.suspend()),
])
# join
async def resume(self):
# fork
await sv.fork_join([
cocotb.fork(self.drv.resume()),
cocotb.fork(self.tx_mon.resume()),
cocotb.fork(self.rx_mon.resume()),
])
# join
uvm_component_utils(vip_agent)
|
lib/django-0.96/django/views/generic/list_detail.py | MiCHiLU/google_appengine_sdk | 790 | 4626 | from django.template import loader, RequestContext
from django.http import Http404, HttpResponse
from django.core.xheaders import populate_xheaders
from django.core.paginator import ObjectPaginator, InvalidPage
from django.core.exceptions import ObjectDoesNotExist
def object_list(request, queryset, paginate_by=None, page=None,
allow_empty=False, template_name=None, template_loader=loader,
extra_context=None, context_processors=None, template_object_name='object',
mimetype=None):
"""
Generic list of objects.
Templates: ``<app_label>/<model_name>_list.html``
Context:
object_list
list of objects
is_paginated
are the results paginated?
results_per_page
number of objects per page (if paginated)
has_next
is there a next page?
has_previous
is there a prev page?
page
the current page
next
the next page
previous
the previous page
pages
number of pages, total
hits
number of objects, total
last_on_page
the result number of the last of object in the
object_list (1-indexed)
first_on_page
the result number of the first object in the
object_list (1-indexed)
"""
if extra_context is None: extra_context = {}
queryset = queryset._clone()
if paginate_by:
paginator = ObjectPaginator(queryset, paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.get_page(page - 1)
except (InvalidPage, ValueError):
if page == 1 and allow_empty:
object_list = []
else:
raise Http404
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'is_paginated': paginator.pages > 1,
'results_per_page': paginate_by,
'has_next': paginator.has_next_page(page - 1),
'has_previous': paginator.has_previous_page(page - 1),
'page': page,
'next': page + 1,
'previous': page - 1,
'last_on_page': paginator.last_on_page(page - 1),
'first_on_page': paginator.first_on_page(page - 1),
'pages': paginator.pages,
'hits' : paginator.hits,
}, context_processors)
else:
c = RequestContext(request, {
'%s_list' % template_object_name: queryset,
'is_paginated': False
}, context_processors)
if not allow_empty and len(queryset) == 0:
raise Http404
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
if not template_name:
model = queryset.model
template_name = "%s/%s_list.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=mimetype)
def object_detail(request, queryset, object_id=None, slug=None,
slug_field=None, template_name=None, template_name_field=None,
template_loader=loader, extra_context=None,
context_processors=None, template_object_name='object',
mimetype=None):
"""
Generic detail of an object.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object
the object
"""
if extra_context is None: extra_context = {}
model = queryset.model
if object_id:
queryset = queryset.filter(pk=object_id)
elif slug and slug_field:
queryset = queryset.filter(**{slug_field: slug})
else:
raise AttributeError, "Generic detail view must be called with either an object_id or a slug/slug_field."
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404, "No %s found matching the query" % (model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
|
ghostwriter/rolodex/apps.py | bbhunter/Ghostwriter | 601 | 4674 | """This contains the configuration of the Rolodex application."""
# Django Imports
from django.apps import AppConfig
class RolodexConfig(AppConfig):
name = "ghostwriter.rolodex"
def ready(self):
try:
import ghostwriter.rolodex.signals # noqa F401 isort:skip
except ImportError:
pass
|
src/foremast/validate.py | dnava013/foremast | 157 | 4690 | <gh_stars>100-1000
"""Spinnaker validate functions."""
import logging
from .consts import API_URL
from .utils.credentials import get_env_credential
LOG = logging.getLogger(__name__)
def validate_gate():
"""Check Gate connection."""
try:
credentials = get_env_credential()
LOG.debug('Found credentials: %s', credentials)
LOG.info('Gate working.')
except TypeError:
LOG.fatal('Gate connection not valid: API_URL = %s', API_URL)
def validate_all(args):
"""Run all validate steps."""
LOG.debug('Args: %s', args)
LOG.info('Running all validate steps.')
validate_gate()
|
clarifai/rest/grpc/custom_converters/custom_message_to_dict.py | Taik/clarifai-python | 322 | 4696 | <reponame>Taik/clarifai-python
import typing # noqa
from google.protobuf import descriptor
from google.protobuf.json_format import _IsMapEntry, _Printer
from google.protobuf.message import Message # noqa
from clarifai.rest.grpc.proto.clarifai.api.utils import extensions_pb2
def protobuf_to_dict(object_protobuf, use_integers_for_enums=True, ignore_show_empty=False):
# type: (Message, typing.Optional[bool], typing.Optional[bool]) -> dict
# printer = _CustomPrinter(
printer = _CustomPrinter(
including_default_value_fields=False,
preserving_proto_field_name=True,
use_integers_for_enums=use_integers_for_enums,
ignore_show_empty=ignore_show_empty)
# pylint: disable=protected-access
return printer._MessageToJsonObject(object_protobuf)
class _CustomPrinter(_Printer):
def __init__(self, including_default_value_fields, preserving_proto_field_name,
use_integers_for_enums, ignore_show_empty):
super(_CustomPrinter, self).__init__(including_default_value_fields,
preserving_proto_field_name, use_integers_for_enums)
self._ignore_show_empty = ignore_show_empty
def _RegularMessageToJsonObject(self, message, js):
"""
Because of the fields with the custom extension `cl_show_if_empty`, we need to adjust the
original's method's return JSON object and keep these fields.
"""
js = super(_CustomPrinter, self)._RegularMessageToJsonObject(message, js)
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
if (self._ignore_show_empty and
not field.GetOptions().Extensions[extensions_pb2.cl_default_float]):
continue
if not field.GetOptions().Extensions[extensions_pb2.cl_show_if_empty]:
continue
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if name in js:
# Skip the field which has been serialized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = self._FieldToJsonObject(field, field.default_value)
return js
def _StructMessageToJsonObject(self, message):
"""
Converts Struct message according to Proto3 JSON Specification.
However, by default, empty objects {} get converted to null. We overwrite this behavior so {}
get converted to {}.
"""
fields = message.fields
ret = {}
for key in fields:
# When there's a Struct with an empty Struct field, this condition will hold True.
# Far as I know this is the only case this condition will be true. If not, this condition
# needs to be amended.
if fields[key].WhichOneof('kind') is None:
json_object = {}
else:
json_object = self._ValueMessageToJsonObject(fields[key])
ret[key] = json_object
return ret
|
download.py | JamesWang007/Open3D-PointNet | 120 | 4702 | <reponame>JamesWang007/Open3D-PointNet<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Download big files from Google Drive."""
import shutil
import sys
import requests
import os
import time
import urllib.request
import zipfile
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
if percent % 5 == 0:
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def sizeof_fmt(num, suffix='B'):
# https://stackoverflow.com/a/1094933/5308925
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.2f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.2f%s%s" % (num, 'Yi', suffix)
def print_status(destination, progress):
message = "Downloading %s... %s" % (destination, sizeof_fmt(progress))
empty_space = shutil.get_terminal_size((80, 20)).columns - len(message)
sys.stdout.write('\r' + message + empty_space * ' ')
sys.stdout.flush()
def download_file_from_google_drive(id, destination):
# https://stackoverflow.com/a/39225039/5308925
def save_response_content(response, destination):
chunk_size = 32768
written_size = 0
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
written_size += chunk_size
print_status(destination, written_size)
print('Done.')
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
url = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(url, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
def download_contents():
# download model
model_path = './cls_model.pth'
if os.path.isfile(model_path):
print('Model file already downloaded in', model_path)
else:
download_file_from_google_drive('1WWf5B5fmik5_P1dwxltJ-atRkYeCcCC5', './cls_model.pth')
# download dataset
dataset_path = './shapenetcore_partanno_segmentation_benchmark_v0.zip'
if os.path.isfile(dataset_path):
print('Dataset file already downloaded in', dataset_path)
else:
dataset_url = 'https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_segmentation_benchmark_v0.zip'
urllib.request.urlretrieve(dataset_url, os.path.basename(dataset_url), reporthook)
# unzip dataset
zip_ref = zipfile.ZipFile(os.path.basename(dataset_url), 'r')
zip_ref.extractall('.')
zip_ref.close()
print('Now unzipping...Wait for 2 minutes ish...!')
return 0
if __name__ == '__main__':
download_contents()
|
tests/test_models/test_backbones/test_encoder_decoders/test_deepfill_encoder.py | Jian137/mmediting-1 | 1,884 | 4711 | <gh_stars>1000+
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.backbones import ContextualAttentionNeck, DeepFillEncoder
from mmedit.models.common import SimpleGatedConvModule
def test_deepfill_enc():
encoder = DeepFillEncoder()
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
if torch.cuda.is_available():
encoder = DeepFillEncoder().cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
encoder = DeepFillEncoder(
conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 96, 64, 64)
assert isinstance(encoder.enc2, SimpleGatedConvModule)
assert encoder.enc2.conv.stride == (2, 2)
assert encoder.enc2.conv.out_channels == 48 * 2
def test_deepfill_contextual_attention_neck():
# TODO: add unittest for contextual attention module
neck = ContextualAttentionNeck(in_channels=128)
x = torch.rand((2, 128, 64, 64))
mask = torch.zeros((2, 1, 64, 64))
mask[..., 20:100, 23:90] = 1.
res, offset = neck(x, mask)
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
if torch.cuda.is_available():
neck.cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
neck = ContextualAttentionNeck(
in_channels=128, conv_type='gated_conv').cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
assert isinstance(neck.conv1, SimpleGatedConvModule)
|
vmca/python/get_cert.py | wfu8/lightwave | 357 | 4724 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright © 2012-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the “License”); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS, without
# warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Helper function that gets certificates from VMWare Certificate Authority
# More details. If this module can be used as a main program, include usage information.
""" certool.py : This is the standard library function for
cloudVM/vcenterwindows first boot to integrate with
VMCA Certificate Generation.
if not running under a cloudVM, then it is assumed that
the OS.Environment has the following defined.
VMWARE_SKIP_VISL = True
system.urlhostname
vmdir.ldu-guid
system.hostname.type
vmca.cert.password
vmca.cert.dir
"""
__copyright__ = "Copyright 2012, VMware Inc."
__version__ = 0.1
__author__ = "VMware, Inc."
import logging
import os
import subprocess
class CerTool:
__vislInstall__ = ""
__systemUrlHostname__ = ""
__systemHosttype__ = ""
__vmcaPassword__ = ""
__vmcaCertPath__ = ""
__skipInstallParams__ = False
__certfileName__ = ""
__privateKeyFileName__ = ""
__publicKeyFileName__ = ""
__pfxFileName__ = ""
def __init__(self):
self.FindEnvParams()
self.GetVislParams()
def GetHostName(self):
return self.__systemUrlHostname__
def GetHostType(self):
return self.__systemHosttype__
def GetPassword(self):
return self.__vmcaPassword__
def GetCertDir(self):
return self.__vmcaCertPath__
def GetCertFileName(self):
return self.__certfileName__
def GetPrivateKeyFileName(self):
return self.__privateKeyFile__
def GetPublicKeyFileName(self):
return self.__publicKeyFile__
def GetPfxFileName(self):
return self.__pfxFileName__
def GenCert(self, componentName):
""" Generates the Certificates in the Cert directory"""
# Generate full file names for all artifacts
self.__certfileName__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".crt")
logging.debug("cert File Name : " + self.GetCertFileName())
self.__privateKeyFile__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".priv")
logging.debug("Private Key Name : " + self.GetPrivateKeyFileName())
self.__publicKeyFile__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".pub")
logging.debug("Public Key Name : " + self.GetPublicKeyFileName())
self.__pfxFileName__ = \
os.path.join(self.GetCertDir(), componentName, componentName + ".pfx")
logging.debug("pfx file Name : " + self.GetPfxFileName())
dir = os.path.join(self.GetCertDir(),componentName)
logging.debug("Target Dir : " + dir)
try:
if not os.path.exists(dir):
os.makedirs(dir)
logging.debug("Created directory")
except OSError as e:
raise Exception("I/O error({0}): {1}".format(e.errno, e.strerror))
# Generate Private Key and Public Keys First
cmd = [self.GetCertToolPath(),
'--genkey',
'--priv=' + self.GetPrivateKeyFileName(),
'--pub=' + self.GetPublicKeyFileName()]
output = self.RunCmd(cmd)
logging.info(output)
cmd = [self.GetCertToolPath(),
'--genCIScert',
'--priv=' + self.GetPrivateKeyFileName(),
'--cert=' + self.GetCertFileName(),
'--Name=' + componentName]
# if we know the host name, put that into the certificate
if (self.GetHostType() == 'fqdn'):
cmd.append('--FQDN=' + self.GetHostName())
# elif (self.GetHostType() == 'ipv4'):
# # Possible TODO : support IPv4 in certificates
# elif (self.GetHostType() == 'ipv6'):
# # Possible TODO : support IPv6 in certificates
output = self.RunCmd(cmd)
logging.info(output)
# TODO : Replace this with certool PKCS12 capabilities
cmd = [self.GetOpenSSLPath(),
'pkcs12',
'-export',
'-in',
self.GetCertFileName(),
'-inkey',
self.GetPrivateKeyFileName(),
'-out',
self.GetPfxFileName(),
'-name',
componentName,
'-passout',
'pass:' + self.GetPassword()]
output = self.RunCmd(cmd)
logging.info(output)
def FindEnvParams(self):
""" Finds the Default Environment parameters. if you are
not running inside the cloudVM, set VMWARE_SKIP_VISL = True
in your environment. This will enable this script to look
for values in the env. block instead of VISL namespace."""
# Find VISL Install Parameter
INSTALL_PARAM_ENV_VAR = 'VMWARE_INSTALL_PARAMETER'
VMWARE_SKIP_VISL = 'VMWARE_SKIP_VISL'
if INSTALL_PARAM_ENV_VAR in os.environ:
self.__vislInstall__ = os.environ[INSTALL_PARAM_ENV_VAR]
if VMWARE_SKIP_VISL in os.environ:
skip = os.environ[VMWARE_SKIP_VISL]
if (skip in ['true', 'True', 'yes', '1', 'skip']):
self.__skipInstallParams__ = True
if (not self.__vislInstall__ and self.__skipInstallParams__ is False):
errString = 'Unable to find install param script'
logging.error(errString)
raise Exception(errString)
logging.debug('Using install param script : ' + self.__vislInstall__)
def GetInstallParams(self, key):
""" Waits on Install Parameter to return the value from visl.
Or if the VMWARE_SKIP_VISL = True, then reads the value from
the os environment"""
if (self.__skipInstallParams__ is False):
cmd = [self.__vislInstall__, '-d', key]
output = self.RunCmd(cmd)
logging.debug('Install param found :' + output)
return output
else:
if val in os.environ:
param = os.environ[key]
logging.debug('Env. param found : ' + param)
return param
else:
raise Exception('Requested Value not found in Env : ' + key)
def RunCmd(self, args):
""" Runs a given command"""
logging.info('running %s' % args)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if p.returncode:
raise Exception('Failed to execute last cmd')
else:
return p.communicate()[0].rstrip()
def GetVislParams(self):
""" Waits for all VISL parameters that VMCA certool needs"""
INSTALL_PARAM_SYSTEM_URL_HOSTNAME = "system.urlhostname"
INSTALL_PARAM_LDU_GUID = "vmdir.ldu-guid"
INSTALL_PARAM_SYSTEM_HOST_TYPE = "system.hostname.type"
INSTALL_PARAM_PASSWORD = "<PASSWORD>"
INSTALL_PARAM_CERT_DIR = "vmca.cert.dir"
# Please note that each of this is a blocking call.
# VISL will wait until these value are populated by the
# appropriate Script
self.__systemUrlHostname__ = \
self.GetInstallParams(INSTALL_PARAM_SYSTEM_URL_HOSTNAME)
self.__systemHosttype__ = \
self.GetInstallParams(INSTALL_PARAM_SYSTEM_HOST_TYPE)
self.__vmcaPassword__ = \
self.GetInstallParams(INSTALL_PARAM_PASSWORD)
self.__vmcaCertPath__ = \
self.GetInstallParams(INSTALL_PARAM_CERT_DIR)
# We really don't need this value,
# it is a technique on waiting for directory
# first boot to finish.
discardldu = self.GetInstallParams(INSTALL_PARAM_LDU_GUID)
def GetCertToolPath(self):
"""returns the path to certool"""
#TODO : Publish Certool Path from VMCA First Boot
if(os.name == "nt"):
PROGRAM_FILES = os.environ['PROGRAMFILES']
return os.path.normpath(PROGRAM_FILES +
'/VMware/CIS/Vmcad/certool.exe')
elif (os.name == 'posix'):
return '/opt/vmware/bin/certool'
def GetOpenSSLPath(self):
if(os.name == "nt"):
PROGRAM_FILES = os.environ['PROGRAMFILES']
return os.path.normpath(PROGRAM_FILES +
'/VMware/CIS/OpenSSL/openssl.exe')
elif (os.name == 'posix'):
return '/usr/lib/vmware-openSSL/openssl'
def main():
""" Example Code Usage """
testComponent = 'sso'
VmcaCertool = CerTool()
VmcaCertool.GenCert(testComponent)
print 'Generated a pfx file : %s' % VmcaCertool.GetPfxFileName()
print 'Using Password : %s' % VmcaCertool.GetPassword()
if __name__ == "__main__":
main()
|
sdk/python/pulumi_gcp/securitycenter/notification_config.py | sisisin/pulumi-gcp | 121 | 4725 | <filename>sdk/python/pulumi_gcp/securitycenter/notification_config.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NotificationConfigArgs', 'NotificationConfig']
@pulumi.input_type
class NotificationConfigArgs:
def __init__(__self__, *,
config_id: pulumi.Input[str],
organization: pulumi.Input[str],
pubsub_topic: pulumi.Input[str],
streaming_config: pulumi.Input['NotificationConfigStreamingConfigArgs'],
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a NotificationConfig resource.
:param pulumi.Input[str] config_id: This must be unique within the organization.
:param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification
Config lives in.
:param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
:param pulumi.Input['NotificationConfigStreamingConfigArgs'] streaming_config: The config for triggering streaming-based notifications.
Structure is documented below.
:param pulumi.Input[str] description: The description of the notification config (max of 1024 characters).
"""
pulumi.set(__self__, "config_id", config_id)
pulumi.set(__self__, "organization", organization)
pulumi.set(__self__, "pubsub_topic", pubsub_topic)
pulumi.set(__self__, "streaming_config", streaming_config)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Input[str]:
"""
This must be unique within the organization.
"""
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: pulumi.Input[str]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter
def organization(self) -> pulumi.Input[str]:
"""
The organization whose Cloud Security Command Center the Notification
Config lives in.
"""
return pulumi.get(self, "organization")
@organization.setter
def organization(self, value: pulumi.Input[str]):
pulumi.set(self, "organization", value)
@property
@pulumi.getter(name="pubsubTopic")
def pubsub_topic(self) -> pulumi.Input[str]:
"""
The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
"""
return pulumi.get(self, "pubsub_topic")
@pubsub_topic.setter
def pubsub_topic(self, value: pulumi.Input[str]):
pulumi.set(self, "pubsub_topic", value)
@property
@pulumi.getter(name="streamingConfig")
def streaming_config(self) -> pulumi.Input['NotificationConfigStreamingConfigArgs']:
"""
The config for triggering streaming-based notifications.
Structure is documented below.
"""
return pulumi.get(self, "streaming_config")
@streaming_config.setter
def streaming_config(self, value: pulumi.Input['NotificationConfigStreamingConfigArgs']):
pulumi.set(self, "streaming_config", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the notification config (max of 1024 characters).
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class _NotificationConfigState:
def __init__(__self__, *,
config_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
pubsub_topic: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None,
streaming_config: Optional[pulumi.Input['NotificationConfigStreamingConfigArgs']] = None):
"""
Input properties used for looking up and filtering NotificationConfig resources.
:param pulumi.Input[str] config_id: This must be unique within the organization.
:param pulumi.Input[str] description: The description of the notification config (max of 1024 characters).
:param pulumi.Input[str] name: The resource name of this notification config, in the format
'organizations/{{organization}}/notificationConfigs/{{config_id}}'.
:param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification
Config lives in.
:param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
:param pulumi.Input[str] service_account: The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic.
:param pulumi.Input['NotificationConfigStreamingConfigArgs'] streaming_config: The config for triggering streaming-based notifications.
Structure is documented below.
"""
if config_id is not None:
pulumi.set(__self__, "config_id", config_id)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if organization is not None:
pulumi.set(__self__, "organization", organization)
if pubsub_topic is not None:
pulumi.set(__self__, "pubsub_topic", pubsub_topic)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if streaming_config is not None:
pulumi.set(__self__, "streaming_config", streaming_config)
@property
@pulumi.getter(name="configId")
def config_id(self) -> Optional[pulumi.Input[str]]:
"""
This must be unique within the organization.
"""
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the notification config (max of 1024 characters).
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The resource name of this notification config, in the format
'organizations/{{organization}}/notificationConfigs/{{config_id}}'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def organization(self) -> Optional[pulumi.Input[str]]:
"""
The organization whose Cloud Security Command Center the Notification
Config lives in.
"""
return pulumi.get(self, "organization")
@organization.setter
def organization(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organization", value)
@property
@pulumi.getter(name="pubsubTopic")
def pubsub_topic(self) -> Optional[pulumi.Input[str]]:
"""
The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
"""
return pulumi.get(self, "pubsub_topic")
@pubsub_topic.setter
def pubsub_topic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pubsub_topic", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input[str]]:
"""
The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic.
"""
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter(name="streamingConfig")
def streaming_config(self) -> Optional[pulumi.Input['NotificationConfigStreamingConfigArgs']]:
"""
The config for triggering streaming-based notifications.
Structure is documented below.
"""
return pulumi.get(self, "streaming_config")
@streaming_config.setter
def streaming_config(self, value: Optional[pulumi.Input['NotificationConfigStreamingConfigArgs']]):
pulumi.set(self, "streaming_config", value)
class NotificationConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
pubsub_topic: Optional[pulumi.Input[str]] = None,
streaming_config: Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]] = None,
__props__=None):
"""
A Cloud Security Command Center (Cloud SCC) notification configs. A
notification config is a Cloud SCC resource that contains the
configuration to send notifications for create/update events of
findings, assets and etc.
> **Note:** In order to use Cloud SCC resources, your organization must be enrolled
in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center).
Without doing so, you may run into errors during resource creation.
To get more information about NotificationConfig, see:
* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1/organizations.notificationConfigs)
* How-to Guides
* [Official Documentation](https://cloud.google.com/security-command-center/docs)
## Example Usage
### Scc Notification Config Basic
```python
import pulumi
import pulumi_gcp as gcp
scc_notification = gcp.pubsub.Topic("sccNotification")
custom_notification_config = gcp.securitycenter.NotificationConfig("customNotificationConfig",
config_id="my-config",
organization="123456789",
description="My custom Cloud Security Command Center Finding Notification Configuration",
pubsub_topic=scc_notification.id,
streaming_config=gcp.securitycenter.NotificationConfigStreamingConfigArgs(
filter="category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"",
))
```
## Import
NotificationConfig can be imported using any of these accepted formats
```sh
$ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default organizations/{{organization}}/notificationConfigs/{{name}}
```
```sh
$ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default {{organization}}/{{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_id: This must be unique within the organization.
:param pulumi.Input[str] description: The description of the notification config (max of 1024 characters).
:param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification
Config lives in.
:param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
:param pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']] streaming_config: The config for triggering streaming-based notifications.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NotificationConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Cloud Security Command Center (Cloud SCC) notification configs. A
notification config is a Cloud SCC resource that contains the
configuration to send notifications for create/update events of
findings, assets and etc.
> **Note:** In order to use Cloud SCC resources, your organization must be enrolled
in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center).
Without doing so, you may run into errors during resource creation.
To get more information about NotificationConfig, see:
* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1/organizations.notificationConfigs)
* How-to Guides
* [Official Documentation](https://cloud.google.com/security-command-center/docs)
## Example Usage
### Scc Notification Config Basic
```python
import pulumi
import pulumi_gcp as gcp
scc_notification = gcp.pubsub.Topic("sccNotification")
custom_notification_config = gcp.securitycenter.NotificationConfig("customNotificationConfig",
config_id="my-config",
organization="123456789",
description="My custom Cloud Security Command Center Finding Notification Configuration",
pubsub_topic=scc_notification.id,
streaming_config=gcp.securitycenter.NotificationConfigStreamingConfigArgs(
filter="category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"",
))
```
## Import
NotificationConfig can be imported using any of these accepted formats
```sh
$ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default organizations/{{organization}}/notificationConfigs/{{name}}
```
```sh
$ pulumi import gcp:securitycenter/notificationConfig:NotificationConfig default {{organization}}/{{name}}
```
:param str resource_name: The name of the resource.
:param NotificationConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NotificationConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
pubsub_topic: Optional[pulumi.Input[str]] = None,
streaming_config: Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NotificationConfigArgs.__new__(NotificationConfigArgs)
if config_id is None and not opts.urn:
raise TypeError("Missing required property 'config_id'")
__props__.__dict__["config_id"] = config_id
__props__.__dict__["description"] = description
if organization is None and not opts.urn:
raise TypeError("Missing required property 'organization'")
__props__.__dict__["organization"] = organization
if pubsub_topic is None and not opts.urn:
raise TypeError("Missing required property 'pubsub_topic'")
__props__.__dict__["pubsub_topic"] = pubsub_topic
if streaming_config is None and not opts.urn:
raise TypeError("Missing required property 'streaming_config'")
__props__.__dict__["streaming_config"] = streaming_config
__props__.__dict__["name"] = None
__props__.__dict__["service_account"] = None
super(NotificationConfig, __self__).__init__(
'gcp:securitycenter/notificationConfig:NotificationConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
pubsub_topic: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None,
streaming_config: Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]] = None) -> 'NotificationConfig':
"""
Get an existing NotificationConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_id: This must be unique within the organization.
:param pulumi.Input[str] description: The description of the notification config (max of 1024 characters).
:param pulumi.Input[str] name: The resource name of this notification config, in the format
'organizations/{{organization}}/notificationConfigs/{{config_id}}'.
:param pulumi.Input[str] organization: The organization whose Cloud Security Command Center the Notification
Config lives in.
:param pulumi.Input[str] pubsub_topic: The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
:param pulumi.Input[str] service_account: The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic.
:param pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']] streaming_config: The config for triggering streaming-based notifications.
Structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NotificationConfigState.__new__(_NotificationConfigState)
__props__.__dict__["config_id"] = config_id
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["organization"] = organization
__props__.__dict__["pubsub_topic"] = pubsub_topic
__props__.__dict__["service_account"] = service_account
__props__.__dict__["streaming_config"] = streaming_config
return NotificationConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Output[str]:
"""
This must be unique within the organization.
"""
return pulumi.get(self, "config_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the notification config (max of 1024 characters).
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name of this notification config, in the format
'organizations/{{organization}}/notificationConfigs/{{config_id}}'.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def organization(self) -> pulumi.Output[str]:
"""
The organization whose Cloud Security Command Center the Notification
Config lives in.
"""
return pulumi.get(self, "organization")
@property
@pulumi.getter(name="pubsubTopic")
def pubsub_topic(self) -> pulumi.Output[str]:
"""
The Pub/Sub topic to send notifications to. Its format is
"projects/[project_id]/topics/[topic]".
"""
return pulumi.get(self, "pubsub_topic")
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> pulumi.Output[str]:
"""
The service account that needs "pubsub.topics.publish" permission to publish to the Pub/Sub topic.
"""
return pulumi.get(self, "service_account")
@property
@pulumi.getter(name="streamingConfig")
def streaming_config(self) -> pulumi.Output['outputs.NotificationConfigStreamingConfig']:
"""
The config for triggering streaming-based notifications.
Structure is documented below.
"""
return pulumi.get(self, "streaming_config")
|
unittests/tools/test_intsights_parser.py | M-Rod101/django-DefectDojo | 249 | 4732 | from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.intsights.parser import IntSightsParser
class TestIntSightsParser(DojoTestCase):
def test_intsights_parser_with_one_critical_vuln_has_one_findings_json(
self):
testfile = open("unittests/scans/intsights/intsights_one_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
finding = list(findings)[0]
self.assertEqual(
'5c80dbf83b4a3900078b6be6',
finding.unique_id_from_tool)
self.assertEqual(
'HTTP headers weakness in initech.com web server',
finding.title)
self.assertEquals('Critical', finding.severity)
self.assertEquals(
"https://dashboard.intsights.com/#/threat-command/alerts?search=5c80dbf83b4a3900078b6be6",
finding.references)
def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv(
self):
testfile = open("unittests/scans/intsights/intsights_one_vuln.csv")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
finding = list(findings)[0]
self.assertEqual(
"mn7xy83finmmth4ja363rci9",
finding.unique_id_from_tool)
self.assertEqual(
"HTTP headers weakness in company-domain.com web server",
finding.title)
def test_intsights_parser_with_many_vuln_has_many_findings_json(self):
testfile = open("unittests/scans/intsights/intsights_many_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_intsights_parser_with_many_vuln_has_many_findings_csv(self):
testfile = open("unittests/scans/intsights/intsights_many_vuln.csv")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(9, len(findings))
def test_intsights_parser_invalid_text_with_error_csv(self):
with self.assertRaises(ValueError):
testfile = open(
"unittests/scans/intsights/intsights_invalid_file.txt")
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py | Polidea/SiriusObfuscator | 427 | 4741 | <gh_stars>100-1000
"""
Test the lldb disassemble command on each call frame when stopped on C's ctor.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class IterateFrameAndDisassembleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_and_run_command(self):
"""Disassemble each call frame when stopped on C's constructor."""
self.build()
self.breakOnCtor()
raw_output = self.res.GetOutput()
frameRE = re.compile(r"""
^\s\sframe # heading for the frame info,
.* # wildcard, and
0x[0-9a-f]{16} # the frame pc, and
\sa.out`(.+) # module`function, and
\s\+\s # the rest ' + ....'
""", re.VERBOSE)
for line in raw_output.split(os.linesep):
match = frameRE.search(line)
if match:
function = match.group(1)
#print("line:", line)
#print("function:", function)
self.runCmd("disassemble -n '%s'" % function)
@add_test_categories(['pyapi'])
def test_and_python_api(self):
"""Disassemble each call frame when stopped on C's constructor."""
self.build()
self.breakOnCtor()
# Now use the Python API to get at each function on the call stack and
# disassemble it.
target = self.dbg.GetSelectedTarget()
process = target.GetProcess()
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
depth = thread.GetNumFrames()
for i in range(depth - 1):
frame = thread.GetFrameAtIndex(i)
function = frame.GetFunction()
# Print the function header.
if self.TraceOn():
print()
print(function)
if function:
# Get all instructions for this function and print them out.
insts = function.GetInstructions(target)
for inst in insts:
# We could simply do 'print inst' to print out the disassembly.
# But we want to print to stdout only if self.TraceOn() is
# True.
disasm = str(inst)
if self.TraceOn():
print(disasm)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break for main.cpp.
self.line = line_number('main.cpp', '// Set break point at this line.')
def breakOnCtor(self):
"""Setup/run the program so it stops on C's constructor."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break on the ctor function of class C.
bpno = lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=-1)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint %d.' % (bpno)])
# This test was failing because we fail to put the C:: in front of constructore.
# We should maybe make another testcase to cover that specifically, but we shouldn't
# fail this whole testcase for an inessential issue.
# We should be stopped on the ctor function of class C.
# self.expect("thread backtrace", BACKTRACE_DISPLAYED_CORRECTLY,
# substrs = ['C::C'])
|
scalability/tests/test_misc.py | ggreif/ic | 941 | 4754 | import unittest
from unittest import TestCase
from misc import verify
class TestVerify(TestCase):
"""Tests misc.py verifies function."""
def test_verify__with_zero_threshold_and_expected_succeeds(self):
"""Test passes when expected rate, actual rate and threshold are all zero."""
result = verify(metric="Query failure rate", actual=0.0, expected=0.0, threshold=0.0)
self.assertEqual(result, 0)
def test_verify__fails_when_positive_delta_is_larger_than_postive_threshold(self):
"""Test fails when positive delta between actual rate and expected rate exceeds positive threshold."""
result = verify(metric="Update latency", actual=200, expected=100, threshold=0.1)
self.assertEqual(result, 1)
def test_verify__fails_when_negative_delta_is_smaller_than_negative_threshold(self):
"""Test fails when negative delta between actual rate and expected rate exceeds negative threshold."""
result = verify(metric="Update latency", actual=50, expected=100, threshold=-0.01)
self.assertEqual(result, 1)
def test_verify__fails_when_negative_delta_and_positive_threshold(self):
"""Test fails when delta between actual rate and expected rate exceeds threshold."""
result = verify(metric="Update latency", actual=50, expected=100, threshold=0.01)
self.assertEqual(result, 0)
if __name__ == "__main__":
unittest.main()
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py | maximmenshikov/antlr4 | 11,811 | 4771 | #
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4 import DFA
from antlr4.Utils import str_list
from antlr4.dfa.DFAState import DFAState
class DFASerializer(object):
__slots__ = ('dfa', 'literalNames', 'symbolicNames')
def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
self.dfa = dfa
self.literalNames = literalNames
self.symbolicNames = symbolicNames
def __str__(self):
if self.dfa.s0 is None:
return None
with StringIO() as buf:
for s in self.dfa.sortedStates():
n = 0
if s.edges is not None:
n = len(s.edges)
for i in range(0, n):
t = s.edges[i]
if t is not None and t.stateNumber != 0x7FFFFFFF:
buf.write(self.getStateString(s))
label = self.getEdgeLabel(i)
buf.write("-")
buf.write(label)
buf.write("->")
buf.write(self.getStateString(t))
buf.write('\n')
output = buf.getvalue()
if len(output)==0:
return None
else:
return output
def getEdgeLabel(self, i:int):
if i==0:
return "EOF"
if self.literalNames is not None and i<=len(self.literalNames):
return self.literalNames[i-1]
elif self.symbolicNames is not None and i<=len(self.symbolicNames):
return self.symbolicNames[i-1]
else:
return str(i-1)
def getStateString(self, s:DFAState):
n = s.stateNumber
baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "")
if s.isAcceptState:
if s.predicates is not None:
return baseStateStr + "=>" + str_list(s.predicates)
else:
return baseStateStr + "=>" + str(s.prediction)
else:
return baseStateStr
class LexerDFASerializer(DFASerializer):
def __init__(self, dfa:DFA):
super().__init__(dfa, None)
def getEdgeLabel(self, i:int):
return "'" + chr(i) + "'"
|
hierarchical_foresight/env/environment.py | deepneuralmachine/google-research | 23,901 | 4780 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrapper around the maze navigation environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from . import simple_maze
import cv2
import numpy as np
class Environment(object):
"""Wrapper around the Simple maze environment."""
def __init__(self, difficulty=None):
"""Initialize the environment with the specified difficulty."""
self.difficulty = difficulty
self._sim_env = simple_maze.navigate(difficulty=difficulty)
self.stepcount = 0
def reset(self):
"""Resets the environment."""
self.stepcount = 0
time_step = self._sim_env.reset()
return time_step
def get_goal_im(self):
"""Computes and returns the goal image."""
currp = copy.deepcopy(self._sim_env.physics.data.qpos[:])
currv = copy.deepcopy(self._sim_env.physics.data.qvel[:])
self._sim_env.task.dontreset = True
tg = copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])
self._sim_env.physics.data.qpos[:] = tg
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
self._sim_env.physics.data.qpos[:] = tg
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
self._sim_env.physics.data.qpos[:] = currp
self._sim_env.physics.data.qvel[:] = currv
self.step([0, 0])
self._sim_env.task.dontreset = False
return gim
def get_subgoal_ims(self, numg):
"""Computes and returs the ground truth sub goal images."""
currp = copy.deepcopy(self._sim_env.physics.data.qpos[:])
currv = copy.deepcopy(self._sim_env.physics.data.qvel[:])
self._sim_env.task.dontreset = True
tg = copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])
sg = []
if self.difficulty == 'e':
if numg == 1:
self._sim_env.physics.data.qpos[:] = currp + (tg - currp) / 2
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = currp + (tg - currp) / 3
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = currp + 2 * (tg - currp) / 3
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif self.difficulty == 'm':
if numg == 1:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif self.difficulty == 'h':
if numg == 1:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall1A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall1A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall1A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall1A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
sg = np.array(sg)
self._sim_env.physics.data.qpos[:] = currp
self._sim_env.physics.data.qvel[:] = currv
self.step([0, 0])
self._sim_env.task.dontreset = False
return sg
def is_goal(self):
"""Checks if the current state is a goal state."""
return self._sim_env.task.is_goal(self._sim_env.physics)
def step(self, action=None):
"""Steps the environment."""
time_step = self._sim_env.step(action)
self._sim_env.physics.data.qvel[:] = 0
return time_step
def get_observation(self):
"""Return image observation."""
obs = self._sim_env.task.get_observation(self._sim_env.physics)
im = self._sim_env.physics.render(256, 256, camera_id='fixed')
im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_LANCZOS4)
return obs, im
|
contrib/micronet/scripts/file2buf.py | pmalhaire/WireHub | 337 | 4784 | <reponame>pmalhaire/WireHub
#!/usr/bin/env python3
import os
import sys
MAX = 8
fpath = sys.argv[1]
name = sys.argv[2]
with open(fpath, "rb") as fh:
sys.stdout.write("char %s[] = {" % (name,) )
i = 0
while True:
if i > 0:
sys.stdout.write(", ")
if i % MAX == 0:
sys.stdout.write("\n\t")
c = fh.read(1)
if not c:
sys.stdout.write("\n")
break
sys.stdout.write("0x%.2x" % (ord(c), ))
i = i + 1
print("};")
print("")
print("unsigned int %s_sz = %s;" % (name, i))
print("")
|
braintree/account_updater_daily_report.py | futureironman/braintree_python | 182 | 4789 | from braintree.configuration import Configuration
from braintree.resource import Resource
class AccountUpdaterDailyReport(Resource):
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "report_url" in attributes:
self.report_url = attributes.pop("report_url")
if "report_date" in attributes:
self.report_date = attributes.pop("report_date")
def __repr__(self):
detail_list = ["report_url", "report_date"]
return super(AccountUpdaterDailyReport, self).__repr__(detail_list)
|
kafka/structs.py | informatique-cdc/kafka-python | 4,389 | 4795 | """ Other useful structs """
from __future__ import absolute_import
from collections import namedtuple
"""A topic and partition tuple
Keyword Arguments:
topic (str): A topic name
partition (int): A partition id
"""
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
"""A Kafka broker metadata used by admin tools.
Keyword Arguments:
nodeID (int): The Kafka broker id.
host (str): The Kafka broker hostname.
port (int): The Kafka broker port.
rack (str): The rack of the broker, which is used to in rack aware
partition assignment for fault tolerance.
Examples: `RACK1`, `us-east-1d`. Default: None
"""
BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "port", "rack"])
"""A topic partition metadata describing the state in the MetadataResponse.
Keyword Arguments:
topic (str): The topic name of the partition this metadata relates to.
partition (int): The id of the partition this metadata relates to.
leader (int): The id of the broker that is the leader for the partition.
replicas (List[int]): The ids of all brokers that contain replicas of the
partition.
isr (List[int]): The ids of all brokers that contain in-sync replicas of
the partition.
error (KafkaError): A KafkaError object associated with the request for
this partition metadata.
"""
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
"""The Kafka offset commit API
The Kafka offset commit API allows users to provide additional metadata
(in the form of a string) when an offset is committed. This can be useful
(for example) to store information about which node made the commit,
what time the commit was made, etc.
Keyword Arguments:
offset (int): The offset to be committed
metadata (str): Non-null metadata
"""
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
# TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata)
["offset", "metadata"])
"""An offset and timestamp tuple
Keyword Arguments:
offset (int): An offset
timestamp (int): The timestamp associated to the offset
"""
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
MemberInformation = namedtuple("MemberInformation",
["member_id", "client_id", "client_host", "member_metadata", "member_assignment"])
GroupInformation = namedtuple("GroupInformation",
["error_code", "group", "state", "protocol_type", "protocol", "members", "authorized_operations"])
"""Define retry policy for async producer
Keyword Arguments:
Limit (int): Number of retries. limit >= 0, 0 means no retries
backoff_ms (int): Milliseconds to backoff.
retry_on_timeouts:
"""
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
|
Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py | ytognder/Azure-Sentinel | 266 | 4806 | # pylint: disable-msg=C0103
"""
SentinelAnomalyLookup: This package is developed for Azure Sentinel Anomaly lookup
"""
# __init__.py
from .anomaly_lookup_view_helper import AnomalyLookupViewHelper
from .anomaly_finder import AnomalyQueries, AnomalyFinder
|
src/extractors/emojiextractor.py | chmduquesne/rofimoji | 574 | 4819 | import html
from collections import namedtuple
from pathlib import Path
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
Emoji = namedtuple('Emoji', 'char name')
class EmojiExtractor(object):
def __init__(self):
self.all_emojis = self.fetch_emoji_list()
self.annotations = self.fetch_annotations()
self.base_emojis = self.fetch_base_emojis()
def fetch_emoji_list(self: 'EmojiExtractor') -> List[Emoji]:
print('Downloading list of all emojis')
data = requests.get(
'https://unicode.org/emoji/charts-14.0/full-emoji-list.html',
timeout=120
) # type: requests.Response
html = BeautifulSoup(data.text, 'lxml')
emojis = []
for row in html.find('table').find_all('tr'):
if not row.th:
emoji = row.find('td', {'class': 'chars'}).string
description = row.find('td', {'class': 'name'}).string.replace('⊛ ', '')
emojis.append(Emoji(emoji, description))
return emojis
def fetch_annotations(self: 'EmojiExtractor') -> Dict[chr, List[str]]:
print('Downloading annotations')
data = requests.get(
'https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml',
timeout=60
) # type: requests.Response
xpath = XPath('./annotations/annotation[not(@type="tts")]')
return {element.get('cp'): element.text.split(' | ')
for element in xpath(etree.fromstring(data.content))}
def fetch_base_emojis(self: 'EmojiExtractor') -> List[chr]:
print('Downloading list of human emojis...')
data = requests.get(
'https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt',
timeout=60
) # type: requests.Response
started = False
emojis = []
for line in data.text.split('\n'):
if not started and line != '# All omitted code points have Emoji_Modifier_Base=No ':
continue
started = True
if line == '# Total elements: 132':
break
if line and not line.startswith('#'):
emojis.extend(self.resolve_character_range(line.split(';')[0].strip()))
return emojis
def resolve_character_range(self, line: str) -> List[str]:
try:
(start, end) = line.split('..')
return [chr(char) for char in range(int(start, 16), int(end, 16) + 1)]
except ValueError:
return [self.resolve_character(line)]
def resolve_character(self, string: str) -> str:
return "".join(chr(int(character, 16)) for character in string.split(' '))
def write_symbol_file(self: 'EmojiExtractor'):
print('Writing collected emojis to symbol file')
with Path('../picker/data/emojis.csv').open('w') as symbol_file:
for entry in self.compile_entries(self.all_emojis):
symbol_file.write(entry + "\n")
def compile_entries(self: 'EmojiExtractor', emojis: List[Emoji]) -> List[str]:
annotated_emojis = []
for emoji in emojis:
entry = f"{emoji.char} {html.escape(emoji.name)}"
if emoji.char in self.annotations:
entry += f" <small>({html.escape(', '.join([annotation for annotation in self.annotations[emoji.char] if annotation != emoji.name]))})</small>"
annotated_emojis.append(entry)
return annotated_emojis
def write_metadata_file(self: 'EmojiExtractor'):
print('Writing metadata to metadata file')
with Path('../picker/copyme.py').open('w') as metadata_file:
metadata_file.write('skin_tone_selectable_emojis={\'')
metadata_file.write('\', \''.join(self.base_emojis))
metadata_file.write('\'}\n')
def extract(self: 'EmojiExtractor'):
self.write_symbol_file()
self.write_metadata_file()
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py | ckamtsikis/cmssw | 852 | 4825 | <filename>PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
import FWCore.ParameterSet.Config as cms
patPFParticles = cms.EDProducer("PATPFParticleProducer",
# General configurables
pfCandidateSource = cms.InputTag("noJet"),
# MC matching configurables
addGenMatch = cms.bool(False),
genParticleMatch = cms.InputTag(""), ## particles source to be used for the MC matching
## must be an InputTag or VInputTag to a product of
## type edm::Association<reco::GenParticleCollection>
embedGenMatch = cms.bool(False), ## embed gen match inside the object instead of storing the ref
# add user data
userData = cms.PSet(
# add custom classes here
userClasses = cms.PSet(
src = cms.VInputTag('')
),
# add doubles here
userFloats = cms.PSet(
src = cms.VInputTag('')
),
# add ints here
userInts = cms.PSet(
src = cms.VInputTag('')
),
# add candidate ptrs here
userCands = cms.PSet(
src = cms.VInputTag('')
),
# add "inline" functions here
userFunctions = cms.vstring(),
userFunctionLabels = cms.vstring()
),
# Efficiencies
addEfficiencies = cms.bool(False),
efficiencies = cms.PSet(),
# resolution
addResolutions = cms.bool(False),
resolutions = cms.PSet(),
)
|
tests/test_api.py | ines/spacy-js | 141 | 4826 | <filename>tests/test_api.py
# coding: utf8
from __future__ import unicode_literals
import pytest
import spacy
import json
from api.server import parse, doc2json, load_model
@pytest.fixture(scope="session")
def model():
return "en_core_web_sm"
@pytest.fixture(scope="session")
def text():
return "This is a sentence about Facebook. This is another one."
@pytest.fixture(scope="session")
def nlp(model):
return spacy.load(model)
@pytest.fixture(scope="session")
def doc(nlp, text):
return nlp(text)
def test_server_parse(model, text, doc):
load_model(model)
json_doc = parse(model, text)
direct_json_doc = doc2json(doc, model)
assert json.dumps(json_doc, sort_keys=True) == json.dumps(
direct_json_doc, sort_keys=True
)
def test_doc2json_doc_tokens(doc, model):
data = doc2json(doc, model)
assert data["model"] == model
assert data["doc"]["text"] == doc.text
assert data["doc"]["text_with_ws"] == doc.text_with_ws
assert data["doc"]["is_tagged"]
assert data["doc"]["is_parsed"]
assert data["doc"]["is_sentenced"]
assert len(data["tokens"]) == len(doc)
assert data["tokens"][0]["text"] == doc[0].text
assert data["tokens"][0]["head"] == doc[0].head.i
def test_doc2json_doc_ents(doc, model):
data = doc2json(doc, model)
ents = list(doc.ents)
assert "ents" in data
assert len(data["ents"]) == len(ents)
assert len(data["ents"]) >= 1
assert data["ents"][0]["start"] == ents[0].start
assert data["ents"][0]["end"] == ents[0].end
assert data["ents"][0]["label"] == ents[0].label_
def test_doc2json_doc_sents(doc, model):
data = doc2json(doc, model)
sents = list(doc.sents)
assert "sents" in data
assert len(data["sents"]) == len(sents)
assert len(data["sents"]) >= 1
assert data["sents"][0]["start"] == sents[0].start
assert data["sents"][0]["end"] == sents[0].end
def test_doc2json_doc_noun_chunks(doc, model):
data = doc2json(doc, model)
chunks = list(doc.noun_chunks)
assert "noun_chunks" in data
assert len(data["noun_chunks"]) == len(chunks)
assert len(data["noun_chunks"]) >= 1
assert data["noun_chunks"][0]["start"] == chunks[0].start
assert data["noun_chunks"][0]["end"] == chunks[0].end
|
cocos2d/tools/coding-style/tailing-spaces.py | NIKEA-SOFT/TestGame | 898 | 4838 | #!/usr/bin/env python
#coding=utf-8
'''
Remove tailing whitespaces and ensures one and only one empty ending line.
'''
import os, re
def scan(*dirs, **kwargs):
files = []
extensions = kwargs['extensions'] if kwargs.has_key('extensions') else None
excludes = kwargs['excludes'] if kwargs.has_key('excludes') else []
for top in dirs:
for root, dirnames, filenames in os.walk(top):
dirnames = [i for i in dirnames if i in excludes]
for f in filenames:
if f in excludes:
continue
ext = os.path.splitext(f)[1].lower()
if extensions is None or ext in extensions:
files.append(os.path.join(root, f))
return files
def fixone(src):
lines = open(src, 'r').readlines()
trimed = []
for line in lines:
trimed.append(re.sub('\s+$', '', line))
while len(trimed) > 1 and not trimed[-1]:
trimed.pop()
trimed.append('')
with open(src, 'w') as f:
for line in trimed:
f.write('%s\n' % line)
def lint(root):
print('Checking tailing whitespaces in: %s' % root)
dirs = [
os.path.join(root, 'cocos'),
os.path.join(root, 'extensions'),
os.path.join(root, 'templates'),
os.path.join(root, 'tests'),
os.path.join(root, 'tools', 'simulator')
]
files = scan(*dirs, extensions=['.c', '.cpp', '.h', '.hpp', '.m', '.mm', '.java'])
for f in files:
print(f)
fixone(f)
def main():
default_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
lint(default_root)
main()
|
generate/lib/run-firefox/firefox_runner.py | flamencist/browser-extensions | 102 | 4841 | <reponame>flamencist/browser-extensions
import os
import shutil
import codecs
import json
from cuddlefish.runner import run_app
from cuddlefish.rdf import RDFManifest
def run():
original_harness_options = os.path.join('development', 'firefox', 'harness-options.json')
backup_harness_options = os.path.join('development', 'firefox', 'harness-options-bak.json')
shutil.move(original_harness_options, backup_harness_options)
with codecs.open(backup_harness_options, encoding='utf8') as harness_file:
harness_config = json.load(harness_file)
run_app(
harness_root_dir=os.path.join('development', 'firefox'),
harness_options=harness_config,
app_type="firefox",
verbose=True
)
|
src/oci/log_analytics/models/log_analytics_association.py | Manny27nyc/oci-python-sdk | 249 | 4856 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsAssociation(object):
"""
LogAnalyticsAssociation
"""
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "ACCEPTED"
LIFE_CYCLE_STATE_ACCEPTED = "ACCEPTED"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "IN_PROGRESS"
LIFE_CYCLE_STATE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "SUCCEEDED"
LIFE_CYCLE_STATE_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "FAILED"
LIFE_CYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsAssociation object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param failure_message:
The value to assign to the failure_message property of this LogAnalyticsAssociation.
:type failure_message: str
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsAssociation.
:type agent_id: str
:param time_last_attempted:
The value to assign to the time_last_attempted property of this LogAnalyticsAssociation.
:type time_last_attempted: datetime
:param retry_count:
The value to assign to the retry_count property of this LogAnalyticsAssociation.
:type retry_count: int
:param source_name:
The value to assign to the source_name property of this LogAnalyticsAssociation.
:type source_name: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsAssociation.
:type source_display_name: str
:param source_type_name:
The value to assign to the source_type_name property of this LogAnalyticsAssociation.
:type source_type_name: str
:param life_cycle_state:
The value to assign to the life_cycle_state property of this LogAnalyticsAssociation.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type life_cycle_state: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsAssociation.
:type entity_id: str
:param entity_name:
The value to assign to the entity_name property of this LogAnalyticsAssociation.
:type entity_name: str
:param entity_type_name:
The value to assign to the entity_type_name property of this LogAnalyticsAssociation.
:type entity_type_name: str
:param host:
The value to assign to the host property of this LogAnalyticsAssociation.
:type host: str
:param agent_entity_name:
The value to assign to the agent_entity_name property of this LogAnalyticsAssociation.
:type agent_entity_name: str
:param entity_type_display_name:
The value to assign to the entity_type_display_name property of this LogAnalyticsAssociation.
:type entity_type_display_name: str
:param log_group_id:
The value to assign to the log_group_id property of this LogAnalyticsAssociation.
:type log_group_id: str
:param log_group_name:
The value to assign to the log_group_name property of this LogAnalyticsAssociation.
:type log_group_name: str
:param log_group_compartment:
The value to assign to the log_group_compartment property of this LogAnalyticsAssociation.
:type log_group_compartment: str
"""
self.swagger_types = {
'failure_message': 'str',
'agent_id': 'str',
'time_last_attempted': 'datetime',
'retry_count': 'int',
'source_name': 'str',
'source_display_name': 'str',
'source_type_name': 'str',
'life_cycle_state': 'str',
'entity_id': 'str',
'entity_name': 'str',
'entity_type_name': 'str',
'host': 'str',
'agent_entity_name': 'str',
'entity_type_display_name': 'str',
'log_group_id': 'str',
'log_group_name': 'str',
'log_group_compartment': 'str'
}
self.attribute_map = {
'failure_message': 'failureMessage',
'agent_id': 'agentId',
'time_last_attempted': 'timeLastAttempted',
'retry_count': 'retryCount',
'source_name': 'sourceName',
'source_display_name': 'sourceDisplayName',
'source_type_name': 'sourceTypeName',
'life_cycle_state': 'lifeCycleState',
'entity_id': 'entityId',
'entity_name': 'entityName',
'entity_type_name': 'entityTypeName',
'host': 'host',
'agent_entity_name': 'agentEntityName',
'entity_type_display_name': 'entityTypeDisplayName',
'log_group_id': 'logGroupId',
'log_group_name': 'logGroupName',
'log_group_compartment': 'logGroupCompartment'
}
self._failure_message = None
self._agent_id = None
self._time_last_attempted = None
self._retry_count = None
self._source_name = None
self._source_display_name = None
self._source_type_name = None
self._life_cycle_state = None
self._entity_id = None
self._entity_name = None
self._entity_type_name = None
self._host = None
self._agent_entity_name = None
self._entity_type_display_name = None
self._log_group_id = None
self._log_group_name = None
self._log_group_compartment = None
@property
def failure_message(self):
"""
Gets the failure_message of this LogAnalyticsAssociation.
The failure message.
:return: The failure_message of this LogAnalyticsAssociation.
:rtype: str
"""
return self._failure_message
@failure_message.setter
def failure_message(self, failure_message):
"""
Sets the failure_message of this LogAnalyticsAssociation.
The failure message.
:param failure_message: The failure_message of this LogAnalyticsAssociation.
:type: str
"""
self._failure_message = failure_message
@property
def agent_id(self):
"""
Gets the agent_id of this LogAnalyticsAssociation.
The agent unique identifier.
:return: The agent_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""
Sets the agent_id of this LogAnalyticsAssociation.
The agent unique identifier.
:param agent_id: The agent_id of this LogAnalyticsAssociation.
:type: str
"""
self._agent_id = agent_id
@property
def time_last_attempted(self):
"""
Gets the time_last_attempted of this LogAnalyticsAssociation.
The last attempt date.
:return: The time_last_attempted of this LogAnalyticsAssociation.
:rtype: datetime
"""
return self._time_last_attempted
@time_last_attempted.setter
def time_last_attempted(self, time_last_attempted):
"""
Sets the time_last_attempted of this LogAnalyticsAssociation.
The last attempt date.
:param time_last_attempted: The time_last_attempted of this LogAnalyticsAssociation.
:type: datetime
"""
self._time_last_attempted = time_last_attempted
@property
def retry_count(self):
"""
Gets the retry_count of this LogAnalyticsAssociation.
The number of times the association will be attempted
before failing.
:return: The retry_count of this LogAnalyticsAssociation.
:rtype: int
"""
return self._retry_count
@retry_count.setter
def retry_count(self, retry_count):
"""
Sets the retry_count of this LogAnalyticsAssociation.
The number of times the association will be attempted
before failing.
:param retry_count: The retry_count of this LogAnalyticsAssociation.
:type: int
"""
self._retry_count = retry_count
@property
def source_name(self):
"""
Gets the source_name of this LogAnalyticsAssociation.
The source name.
:return: The source_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""
Sets the source_name of this LogAnalyticsAssociation.
The source name.
:param source_name: The source_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_name = source_name
@property
def source_display_name(self):
"""
Gets the source_display_name of this LogAnalyticsAssociation.
The source display name.
:return: The source_display_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_display_name
@source_display_name.setter
def source_display_name(self, source_display_name):
"""
Sets the source_display_name of this LogAnalyticsAssociation.
The source display name.
:param source_display_name: The source_display_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_display_name = source_display_name
@property
def source_type_name(self):
"""
Gets the source_type_name of this LogAnalyticsAssociation.
The source type internal name.
:return: The source_type_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_type_name
@source_type_name.setter
def source_type_name(self, source_type_name):
"""
Sets the source_type_name of this LogAnalyticsAssociation.
The source type internal name.
:param source_type_name: The source_type_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_type_name = source_type_name
@property
def life_cycle_state(self):
"""
Gets the life_cycle_state of this LogAnalyticsAssociation.
The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED
or FAILED.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The life_cycle_state of this LogAnalyticsAssociation.
:rtype: str
"""
return self._life_cycle_state
@life_cycle_state.setter
def life_cycle_state(self, life_cycle_state):
"""
Sets the life_cycle_state of this LogAnalyticsAssociation.
The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED
or FAILED.
:param life_cycle_state: The life_cycle_state of this LogAnalyticsAssociation.
:type: str
"""
allowed_values = ["ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED"]
if not value_allowed_none_or_none_sentinel(life_cycle_state, allowed_values):
life_cycle_state = 'UNKNOWN_ENUM_VALUE'
self._life_cycle_state = life_cycle_state
@property
def entity_id(self):
"""
Gets the entity_id of this LogAnalyticsAssociation.
The entity unique identifier.
:return: The entity_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this LogAnalyticsAssociation.
The entity unique identifier.
:param entity_id: The entity_id of this LogAnalyticsAssociation.
:type: str
"""
self._entity_id = entity_id
@property
def entity_name(self):
"""
Gets the entity_name of this LogAnalyticsAssociation.
The entity name.
:return: The entity_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_name
@entity_name.setter
def entity_name(self, entity_name):
"""
Sets the entity_name of this LogAnalyticsAssociation.
The entity name.
:param entity_name: The entity_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_name = entity_name
@property
def entity_type_name(self):
"""
Gets the entity_type_name of this LogAnalyticsAssociation.
The entity type internal name.
:return: The entity_type_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_type_name
@entity_type_name.setter
def entity_type_name(self, entity_type_name):
"""
Sets the entity_type_name of this LogAnalyticsAssociation.
The entity type internal name.
:param entity_type_name: The entity_type_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_type_name = entity_type_name
@property
def host(self):
"""
Gets the host of this LogAnalyticsAssociation.
The host name.
:return: The host of this LogAnalyticsAssociation.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this LogAnalyticsAssociation.
The host name.
:param host: The host of this LogAnalyticsAssociation.
:type: str
"""
self._host = host
@property
def agent_entity_name(self):
"""
Gets the agent_entity_name of this LogAnalyticsAssociation.
The name of the entity which contains the agent.
:return: The agent_entity_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._agent_entity_name
@agent_entity_name.setter
def agent_entity_name(self, agent_entity_name):
"""
Sets the agent_entity_name of this LogAnalyticsAssociation.
The name of the entity which contains the agent.
:param agent_entity_name: The agent_entity_name of this LogAnalyticsAssociation.
:type: str
"""
self._agent_entity_name = agent_entity_name
@property
def entity_type_display_name(self):
"""
Gets the entity_type_display_name of this LogAnalyticsAssociation.
The entity type display name.
:return: The entity_type_display_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_type_display_name
@entity_type_display_name.setter
def entity_type_display_name(self, entity_type_display_name):
"""
Sets the entity_type_display_name of this LogAnalyticsAssociation.
The entity type display name.
:param entity_type_display_name: The entity_type_display_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_type_display_name = entity_type_display_name
@property
def log_group_id(self):
"""
Gets the log_group_id of this LogAnalyticsAssociation.
The log group unique identifier.
:return: The log_group_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_id
@log_group_id.setter
def log_group_id(self, log_group_id):
"""
Sets the log_group_id of this LogAnalyticsAssociation.
The log group unique identifier.
:param log_group_id: The log_group_id of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_id = log_group_id
@property
def log_group_name(self):
"""
Gets the log_group_name of this LogAnalyticsAssociation.
The log group name.
:return: The log_group_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_name
@log_group_name.setter
def log_group_name(self, log_group_name):
"""
Sets the log_group_name of this LogAnalyticsAssociation.
The log group name.
:param log_group_name: The log_group_name of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_name = log_group_name
@property
def log_group_compartment(self):
"""
Gets the log_group_compartment of this LogAnalyticsAssociation.
The log group compartment.
:return: The log_group_compartment of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_compartment
@log_group_compartment.setter
def log_group_compartment(self, log_group_compartment):
"""
Sets the log_group_compartment of this LogAnalyticsAssociation.
The log group compartment.
:param log_group_compartment: The log_group_compartment of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_compartment = log_group_compartment
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
mmcls/models/utils/se_layer.py | YuxinZou/mmclassification | 1,190 | 4867 | <gh_stars>1000+
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
squeeze_channels=None,
ratio=16,
divisor=8,
bias='auto',
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if squeeze_channels is None:
squeeze_channels = make_divisible(channels // ratio, divisor)
assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \
'"squeeze_channels" should be a positive integer, but get ' + \
f'{squeeze_channels} instead.'
self.conv1 = ConvModule(
in_channels=channels,
out_channels=squeeze_channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=squeeze_channels,
out_channels=channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
|
core/recognizer.py | awen1988/yry | 129 | 4870 | """
recognize face landmark
"""
import json
import os
import requests
import numpy as np
FACE_POINTS = list(range(0, 83))
JAW_POINTS = list(range(0, 19))
LEFT_EYE_POINTS = list(range(19, 29))
LEFT_BROW_POINTS = list(range(29, 37))
MOUTH_POINTS = list(range(37, 55))
NOSE_POINTS = list(range(55, 65))
RIGHT_EYE_POINTS = list(range(65, 75))
RIGHT_BROW_POINTS = list(range(75, 83))
LEFT_FACE = list(range(0, 10)) + list(range(29, 34))
RIGHT_FACE = list(range(9, 19)) + list(range(75, 80))
JAW_END = 19
FACE_START = 0
FACE_END = 83
OVERLAY_POINTS = [
LEFT_FACE,
RIGHT_FACE,
JAW_POINTS,
]
def face_points(image):
points = []
txt = image + '.txt'
if os.path.isfile(txt):
with open(txt) as file:
for line in file:
points = line
elif os.path.isfile(image):
points = landmarks_by_face__(image)
with open(txt, 'w') as file:
file.write(str(points))
faces = json.loads(points)['faces']
if len(faces) == 0:
err = 404
else:
err = 0
matrix_list = np.matrix(matrix_marks(faces[0]['landmark']))
point_list = []
for p in matrix_list.tolist():
point_list.append((int(p[0]), int(p[1])))
return matrix_list, point_list, err
def landmarks_by_face__(image):
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
params = {
'api_key': '<KEY>',
'api_secret': '<KEY>',
'return_landmark': 1,
}
file = {'image_file': open(image, 'rb')}
r = requests.post(url=url, files=file, data=params)
if r.status_code == requests.codes.ok:
return r.content.decode('utf-8')
else:
return r.content
def matrix_rectangle(left, top, width, height):
pointer = [
(left, top),
(left + width / 2, top),
(left + width - 1, top),
(left + width - 1, top + height / 2),
(left, top + height / 2),
(left, top + height - 1),
(left + width / 2, top + height - 1),
(left + width - 1, top + height - 1)
]
return pointer
def matrix_marks(res):
pointer = [
[res['contour_left1']['x'], res['contour_left1']['y']],
[res['contour_left2']['x'], res['contour_left2']['y']],
[res['contour_left3']['x'], res['contour_left3']['y']],
[res['contour_left4']['x'], res['contour_left4']['y']],
[res['contour_left5']['x'], res['contour_left5']['y']],
[res['contour_left6']['x'], res['contour_left6']['y']],
[res['contour_left7']['x'], res['contour_left7']['y']],
[res['contour_left8']['x'], res['contour_left8']['y']],
[res['contour_left9']['x'], res['contour_left9']['y']],
[res['contour_chin']['x'], res['contour_chin']['y']],
[res['contour_right9']['x'], res['contour_right9']['y']],
[res['contour_right8']['x'], res['contour_right8']['y']],
[res['contour_right7']['x'], res['contour_right7']['y']],
[res['contour_right6']['x'], res['contour_right6']['y']],
[res['contour_right5']['x'], res['contour_right5']['y']],
[res['contour_right4']['x'], res['contour_right4']['y']],
[res['contour_right3']['x'], res['contour_right3']['y']],
[res['contour_right2']['x'], res['contour_right2']['y']],
[res['contour_right1']['x'], res['contour_right1']['y']],
[res['left_eye_bottom']['x'], res['left_eye_bottom']['y']],
[res['left_eye_center']['x'], res['left_eye_center']['y']],
[res['left_eye_left_corner']['x'], res['left_eye_left_corner']['y']],
[res['left_eye_lower_left_quarter']['x'], res['left_eye_lower_left_quarter']['y']],
[res['left_eye_lower_right_quarter']['x'], res['left_eye_lower_right_quarter']['y']],
[res['left_eye_pupil']['x'], res['left_eye_pupil']['y']],
[res['left_eye_right_corner']['x'], res['left_eye_right_corner']['y']],
[res['left_eye_top']['x'], res['left_eye_top']['y']],
[res['left_eye_upper_left_quarter']['x'], res['left_eye_upper_left_quarter']['y']],
[res['left_eye_upper_right_quarter']['x'], res['left_eye_upper_right_quarter']['y']],
[res['left_eyebrow_left_corner']['x'], res['left_eyebrow_left_corner']['y']],
[res['left_eyebrow_upper_left_quarter']['x'], res['left_eyebrow_upper_left_quarter']['y']],
[res['left_eyebrow_upper_middle']['x'], res['left_eyebrow_upper_middle']['y']],
[res['left_eyebrow_upper_right_quarter']['x'], res['left_eyebrow_upper_right_quarter']['y']],
[res['left_eyebrow_right_corner']['x'], res['left_eyebrow_right_corner']['y']],
[res['left_eyebrow_lower_left_quarter']['x'], res['left_eyebrow_lower_left_quarter']['y']],
[res['left_eyebrow_lower_middle']['x'], res['left_eyebrow_lower_middle']['y']],
[res['left_eyebrow_lower_right_quarter']['x'], res['left_eyebrow_lower_right_quarter']['y']],
[res['mouth_left_corner']['x'], res['mouth_left_corner']['y']],
[res['mouth_lower_lip_bottom']['x'], res['mouth_lower_lip_bottom']['y']],
[res['mouth_lower_lip_left_contour1']['x'], res['mouth_lower_lip_left_contour1']['y']],
[res['mouth_lower_lip_left_contour2']['x'], res['mouth_lower_lip_left_contour2']['y']],
[res['mouth_lower_lip_left_contour3']['x'], res['mouth_lower_lip_left_contour3']['y']],
[res['mouth_lower_lip_right_contour1']['x'], res['mouth_lower_lip_right_contour1']['y']],
[res['mouth_lower_lip_right_contour2']['x'], res['mouth_lower_lip_right_contour2']['y']],
[res['mouth_lower_lip_right_contour3']['x'], res['mouth_lower_lip_right_contour3']['y']],
[res['mouth_lower_lip_top']['x'], res['mouth_lower_lip_top']['y']],
[res['mouth_right_corner']['x'], res['mouth_right_corner']['y']],
[res['mouth_upper_lip_bottom']['x'], res['mouth_upper_lip_bottom']['y']],
[res['mouth_upper_lip_left_contour1']['x'], res['mouth_upper_lip_left_contour1']['y']],
[res['mouth_upper_lip_left_contour2']['x'], res['mouth_upper_lip_left_contour2']['y']],
[res['mouth_upper_lip_left_contour3']['x'], res['mouth_upper_lip_left_contour3']['y']],
[res['mouth_upper_lip_right_contour1']['x'], res['mouth_upper_lip_right_contour1']['y']],
[res['mouth_upper_lip_right_contour2']['x'], res['mouth_upper_lip_right_contour2']['y']],
[res['mouth_upper_lip_right_contour3']['x'], res['mouth_upper_lip_right_contour3']['y']],
[res['mouth_upper_lip_top']['x'], res['mouth_upper_lip_top']['y']],
[res['nose_contour_left1']['x'], res['nose_contour_left1']['y']],
[res['nose_contour_left2']['x'], res['nose_contour_left2']['y']],
[res['nose_contour_left3']['x'], res['nose_contour_left3']['y']],
[res['nose_contour_lower_middle']['x'], res['nose_contour_lower_middle']['y']],
[res['nose_contour_right1']['x'], res['nose_contour_right1']['y']],
[res['nose_contour_right2']['x'], res['nose_contour_right2']['y']],
[res['nose_contour_right3']['x'], res['nose_contour_right3']['y']],
[res['nose_left']['x'], res['nose_left']['y']],
[res['nose_right']['x'], res['nose_right']['y']],
[res['nose_tip']['x'], res['nose_tip']['y']],
[res['right_eye_bottom']['x'], res['right_eye_bottom']['y']],
[res['right_eye_center']['x'], res['right_eye_center']['y']],
[res['right_eye_left_corner']['x'], res['right_eye_left_corner']['y']],
[res['right_eye_lower_left_quarter']['x'], res['right_eye_lower_left_quarter']['y']],
[res['right_eye_lower_right_quarter']['x'], res['right_eye_lower_right_quarter']['y']],
[res['right_eye_pupil']['x'], res['right_eye_pupil']['y']],
[res['right_eye_right_corner']['x'], res['right_eye_right_corner']['y']],
[res['right_eye_top']['x'], res['right_eye_top']['y']],
[res['right_eye_upper_left_quarter']['x'], res['right_eye_upper_left_quarter']['y']],
[res['right_eye_upper_right_quarter']['x'], res['right_eye_upper_right_quarter']['y']],
[res['right_eyebrow_left_corner']['x'], res['right_eyebrow_left_corner']['y']],
[res['right_eyebrow_upper_left_quarter']['x'], res['right_eyebrow_upper_left_quarter']['y']],
[res['right_eyebrow_upper_middle']['x'], res['right_eyebrow_upper_middle']['y']],
[res['right_eyebrow_upper_right_quarter']['x'], res['right_eyebrow_upper_right_quarter']['y']],
[res['right_eyebrow_right_corner']['x'], res['right_eyebrow_right_corner']['y']],
[res['right_eyebrow_lower_left_quarter']['x'], res['right_eyebrow_lower_left_quarter']['y']],
[res['right_eyebrow_lower_middle']['x'], res['right_eyebrow_lower_middle']['y']],
[res['right_eyebrow_lower_right_quarter']['x'], res['right_eyebrow_lower_right_quarter']['y']],
]
return pointer
|
src/promnesia/sources/telegram.py | halhenke/promnesia | 1,327 | 4877 | <reponame>halhenke/promnesia<filename>src/promnesia/sources/telegram.py
'''
Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data
'''
from pathlib import Path
from textwrap import dedent
from typing import Optional, Union, TypeVar
from urllib.parse import unquote # TODO mm, make it easier to rememember to use...
from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain
# TODO potentially, belongs to my. package
# TODO kython?
T = TypeVar("T")
def unwrap(res: Union[T, Exception]) -> T:
if isinstance(res, Exception):
raise res
else:
return res
# TODO move to common?
def dataset_readonly(db: Path):
import dataset # type: ignore
# see https://github.com/pudo/dataset/issues/136#issuecomment-128693122
import sqlite3
creator = lambda: sqlite3.connect(f'file:{db}?immutable=1', uri=True)
return dataset.connect('sqlite:///' , engine_kwargs={'creator': creator})
def index(database: PathIsh, *, http_only: bool=None) -> Results:
"""
:param database:
the path of the sqlite generated by the _telegram_backup_ java program
:param http_only:
when true, do not collect IP-addresses and `python.py` strings
"""
logger = get_logger()
path = Path(database)
assert path.is_file(), path # TODO could check is_file inside `dataset_readonly()`
def make_query(text_query: str):
extra_criteria = "AND (M.has_media == 1 OR text LIKE '%http%')" if http_only else ""
return dedent(
f"""
WITH entities AS (
SELECT 'dialog' as type
, id
, coalesce(username, id) as handle
, coalesce(first_name || " " || last_name
, username
, id
) as display_name FROM users
UNION
SELECT 'group' as type
, id
, id as handle
, coalesce(name, id) as display_name FROM chats
)
SELECT src.display_name AS chatname
, src.handle AS chat
, snd.display_name AS sender
, M.time AS time
, {text_query} AS text
, M.id AS mid
FROM messages AS M
/* chat types are 'dialog' (1-1), 'group' and 'supergroup' */
/* this is abit hacky way to handle all groups in one go */
LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN 'supergroup' THEN 'group' ELSE M.source_type END)
LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = 'dialog'
WHERE
M.message_type NOT IN ('service_message', 'empty_message')
{extra_criteria}
ORDER BY time;
""")
# TODO context manager?
with dataset_readonly(path) as db:
# TODO yield error if chatname or chat or smth else is null?
for row in db.query(make_query('M.text')):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
# , None, sys.exc_info()[2]
# TODO hmm. traceback isn't preserved; wonder if that's because it's too heavy to attach to every single exception object..
# old (also 'stable') version doesn't have 'json' column yet...
if 'json' in db['messages'].columns:
for row in db.query(make_query("json_extract(json, '$.media.webpage.description')")):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
def _handle_row(row) -> Results:
text = row['text']
if text is None:
return
urls = extract_urls(text)
if len(urls) == 0:
return
dt = from_epoch(row['time'])
mid: str = unwrap(row['mid'])
# TODO perhaps we could be defensive with null sender/chat etc and still emit the Visit
sender: str = unwrap(row['sender'])
chatname: str = unwrap(row['chatname'])
chat: str = unwrap(row['chat'])
in_context = f'https://t.me/{chat}/{mid}'
for u in urls:
# https://www.reddit.com/r/Telegram/comments/6ufwi3/link_to_a_specific_message_in_a_channel_possible/
# hmm, only seems to work on mobile app, but better than nothing...
yield Visit(
url=unquote(u),
dt=dt,
context=f"{sender}: {text}",
locator=Loc.make(
title=f"chat with {chatname}",
href=in_context,
),
)
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py | monroid/openvino | 2,406 | 4887 | <reponame>monroid/openvino
#
# slice paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle as pdpd
import sys
data_type = 'float32'
def slice(name : str, x, axes : list, start : list, end : list):
pdpd.enable_static()
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type)
slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3))
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type)
slice("slice_1d", x, axes=[0], start=[0], end=[1])
if __name__ == "__main__":
main() |
tacker/sol_refactored/common/vnf_instance_utils.py | h1r0mu/tacker | 116 | 4888 | # Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__) # not used at the moment
def get_inst(context, inst_id):
inst = objects.VnfInstanceV2.get_by_id(context, inst_id)
if inst is None:
raise sol_ex.VnfInstanceNotFound(inst_id=inst_id)
return inst
def get_inst_all(context):
return objects.VnfInstanceV2.get_all(context)
def inst_href(inst_id, endpoint):
return "{}/v2/vnflcm/vnf_instances/{}".format(endpoint, inst_id)
def make_inst_links(inst, endpoint):
links = objects.VnfInstanceV2_Links()
self_href = inst_href(inst.id, endpoint)
links.self = objects.Link(href=self_href)
if inst.instantiationState == 'NOT_INSTANTIATED':
links.instantiate = objects.Link(href=self_href + "/instantiate")
else: # 'INSTANTIATED'
links.terminate = objects.Link(href=self_href + "/terminate")
# TODO(oda-g): add when the operation supported
# links.scale = objects.Link(href = self_href + "/scale")
# etc.
return links
# see IETF RFC 7396
def json_merge_patch(target, patch):
if isinstance(patch, dict):
if not isinstance(target, dict):
target = {}
for key, value in patch.items():
if value is None:
if key in target:
del target[key]
else:
target[key] = json_merge_patch(target.get(key), value)
return target
else:
return patch
def select_vim_info(vim_connection_info):
# NOTE: It is assumed that vimConnectionInfo has only one item
# at the moment. If there are multiple items, it is uncertain
# which item is selected.
for vim_info in vim_connection_info.values():
return vim_info
|
old_py2/tests/models_tests/notifications/test_match_score.py | ofekashery/the-blue-alliance | 266 | 4890 | <reponame>ofekashery/the-blue-alliance<filename>old_py2/tests/models_tests/notifications/test_match_score.py<gh_stars>100-1000
import re
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.notification_type import NotificationType
from helpers.event.event_test_creator import EventTestCreator
from models.team import Team
from models.notifications.match_score import MatchScoreNotification
class TestMatchScoreNotification(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
for team_number in range(6):
Team(id="frc%s" % team_number,
team_number=team_number).put()
self.event = EventTestCreator.createPresentEvent()
self.match = self.event.matches[0]
self.notification = MatchScoreNotification(self.match)
def tearDown(self):
self.testbed.deactivate()
def test_type(self):
self.assertEqual(MatchScoreNotification._type(), NotificationType.MATCH_SCORE)
def test_fcm_notification(self):
self.assertIsNotNone(self.notification.fcm_notification)
self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results')
match_regex = re.compile(r'^\d+, \d+, \d+ beat \d+, \d+, \d+ scoring \d+-\d+.$')
match = re.match(match_regex, self.notification.fcm_notification.body)
self.assertIsNotNone(match)
def test_fcm_notification_tied(self):
score = self.notification.match.alliances['red']['score']
self.notification.match.alliances['blue']['score'] = score
self.assertIsNotNone(self.notification.fcm_notification)
self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results')
match_regex = re.compile(r'^\d+, \d+, \d+ tied with \d+, \d+, \d+ scoring \d+-\d+.$')
match = re.match(match_regex, self.notification.fcm_notification.body)
self.assertIsNotNone(match)
def test_fcm_notification_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
self.assertEqual(notification.fcm_notification.title, 'Team 1 TESTPRESENT Q1 Results')
def test_data_payload(self):
payload = self.notification.data_payload
self.assertEqual(len(payload), 2)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name))
def test_data_payload_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
payload = notification.data_payload
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name))
self.assertEqual(payload['team_key'], 'frc1')
def test_webhook_message_data(self):
# Has `event_name`
payload = self.notification.webhook_message_data
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['event_name'], 'Present Test Event')
self.assertIsNotNone(payload['match'])
def test_webhook_message_data_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
payload = notification.webhook_message_data
self.assertEqual(len(payload), 4)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['event_name'], 'Present Test Event')
self.assertEqual(payload['team_key'], 'frc1')
self.assertIsNotNone(payload['match'])
|
configs/mobilenet_cfbi.py | yoxu515/CFBI | 312 | 4902 | import torch
import argparse
import os
import sys
import cv2
import time
class Configuration():
def __init__(self):
self.EXP_NAME = 'mobilenetv2_cfbi'
self.DIR_ROOT = './'
self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets')
self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS')
self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train')
self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid')
self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME)
self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt')
self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log')
self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img')
self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard')
self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval')
self.DATASETS = ['youtubevos']
self.DATA_WORKERS = 4
self.DATA_RANDOMCROP = (465, 465)
self.DATA_RANDOMFLIP = 0.5
self.DATA_MAX_CROP_STEPS = 5
self.DATA_MIN_SCALE_FACTOR = 1.
self.DATA_MAX_SCALE_FACTOR = 1.3
self.DATA_SHORT_EDGE_LEN = 480
self.DATA_RANDOM_REVERSE_SEQ = True
self.DATA_DAVIS_REPEAT = 30
self.DATA_CURR_SEQ_LEN = 3
self.DATA_RANDOM_GAP_DAVIS = 3
self.DATA_RANDOM_GAP_YTB = 3
self.PRETRAIN = True
self.PRETRAIN_FULL = False
self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar'
self.MODEL_BACKBONE = 'mobilenet'
self.MODEL_MODULE = 'networks.cfbi.cfbi'
self.MODEL_OUTPUT_STRIDE = 16
self.MODEL_ASPP_OUTDIM = 256
self.MODEL_SHORTCUT_DIM = 48
self.MODEL_SEMANTIC_EMBEDDING_DIM = 100
self.MODEL_HEAD_EMBEDDING_DIM = 256
self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64
self.MODEL_GN_GROUPS = 32
self.MODEL_GN_EMB_GROUPS = 25
self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12]
self.MODEL_LOCAL_DOWNSAMPLE = True
self.MODEL_REFINE_CHANNELS = 64 # n * 32
self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24
self.MODEL_RELATED_CHANNELS = 64
self.MODEL_EPSILON = 1e-5
self.MODEL_MATCHING_BACKGROUND = True
self.MODEL_GCT_BETA_WD = True
self.MODEL_FLOAT16_MATCHING = True
self.MODEL_FREEZE_BN = True
self.MODEL_FREEZE_BACKBONE = False
self.TRAIN_TOTAL_STEPS = 100000
self.TRAIN_START_STEP = 0
self.TRAIN_LR = 0.01
self.TRAIN_MOMENTUM = 0.9
self.TRAIN_COSINE_DECAY = False
self.TRAIN_WARM_UP_STEPS = 1000
self.TRAIN_WEIGHT_DECAY = 15e-5
self.TRAIN_POWER = 0.9
self.TRAIN_GPUS = 4
self.TRAIN_BATCH_SIZE = 8
self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_TBLOG = False
self.TRAIN_TBLOG_STEP = 60
self.TRAIN_LOG_STEP = 20
self.TRAIN_IMG_LOG = False
self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15
self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_CLIP_GRAD_NORM = 5.
self.TRAIN_SAVE_STEP = 1000
self.TRAIN_MAX_KEEP_CKPT = 8
self.TRAIN_RESUME = False
self.TRAIN_RESUME_CKPT = None
self.TRAIN_RESUME_STEP = 0
self.TRAIN_AUTO_RESUME = True
self.TRAIN_GLOBAL_ATROUS_RATE = 1
self.TRAIN_LOCAL_ATROUS_RATE = 1
self.TRAIN_GLOBAL_CHUNKS = 20
self.TRAIN_DATASET_FULL_RESOLUTION = True
self.TEST_GPU_ID = 0
self.TEST_DATASET = 'youtubevos'
self.TEST_DATASET_FULL_RESOLUTION = False
self.TEST_DATASET_SPLIT = ['val']
self.TEST_CKPT_PATH = None
self.TEST_CKPT_STEP = None # if "None", evaluate the latest checkpoint.
self.TEST_FLIP = False
self.TEST_MULTISCALE = [1]
self.TEST_MIN_SIZE = None
self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800
self.TEST_WORKERS = 4
self.TEST_GLOBAL_CHUNKS = 4
self.TEST_GLOBAL_ATROUS_RATE = 2
self.TEST_LOCAL_ATROUS_RATE = 1
# dist
self.DIST_ENABLE = True
self.DIST_BACKEND = "gloo"
self.DIST_URL = "file://./sharefile"
self.DIST_START_GPU = 0
self.__check()
def __check(self):
if not torch.cuda.is_available():
raise ValueError('config.py: cuda is not avalable')
if self.TRAIN_GPUS == 0:
raise ValueError('config.py: the number of GPU is 0')
for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]:
if not os.path.isdir(path):
os.makedirs(path)
cfg = Configuration()
|
tools/jdk/local_java_repository.bzl | loongarch64/bazel | 16,989 | 4905 | <reponame>loongarch64/bazel
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for importing and registering a local JDK."""
load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain")
def _detect_java_version(repository_ctx, java_bin):
properties_out = repository_ctx.execute([java_bin, "-XshowSettings:properties"]).stderr
# This returns an indented list of properties separated with newlines:
# " java.vendor.url.bug = ... \n"
# " java.version = 11.0.8\n"
# " java.version.date = 2020-11-05\"
strip_properties = [property.strip() for property in properties_out.splitlines()]
version_property = [property for property in strip_properties if property.startswith("java.version = ")]
if len(version_property) != 1:
return None
version_value = version_property[0][len("java.version = "):]
parts = version_value.split(".")
major = parts[0]
if len(parts) == 1:
return major
elif major == "1": # handles versions below 1.8
minor = parts[1]
return minor
return major
def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]):
"""Defines a java_runtime target together with Java runtime and compile toolchain definitions.
Java runtime toolchain is constrained by flag --java_runtime_version having
value set to either name or version argument.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation. This requires a different configuration for JDK8
than the newer versions.
Args:
name: name of the target.
java_home: Path to the JDK.
version: Version of the JDK.
runtime_name: name of java_runtime target if it already exists.
visibility: Visibility that will be applied to the java runtime target
"""
if runtime_name == None:
runtime_name = name
native.java_runtime(
name = runtime_name,
java_home = java_home,
visibility = visibility,
)
native.config_setting(
name = name + "_name_setting",
values = {"java_runtime_version": name},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_version_setting",
values = {"java_runtime_version": version},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_name_version_setting",
values = {"java_runtime_version": name + "_" + version},
visibility = ["//visibility:private"],
)
native.alias(
name = name + "_settings_alias",
actual = select({
name + "_name_setting": name + "_name_setting",
name + "_version_setting": name + "_version_setting",
"//conditions:default": name + "_name_version_setting",
}),
visibility = ["//visibility:private"],
)
native.toolchain(
name = "runtime_toolchain_definition",
target_settings = [":%s_settings_alias" % name],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = runtime_name,
)
if version == "8":
default_java_toolchain(
name = name + "_toolchain_java8",
configuration = JVM8_TOOLCHAIN_CONFIGURATION,
source_version = version,
target_version = version,
java_runtime = runtime_name,
)
elif type(version) == type("") and version.isdigit() and int(version) > 8:
for version in range(8, int(version) + 1):
default_java_toolchain(
name = name + "_toolchain_java" + str(version),
source_version = str(version),
target_version = str(version),
java_runtime = runtime_name,
)
# else version is not recognized and no compilation toolchains are predefined
def _local_java_repository_impl(repository_ctx):
"""Repository rule local_java_repository implementation.
Args:
repository_ctx: repository context
"""
java_home = repository_ctx.attr.java_home
java_home_path = repository_ctx.path(java_home)
if not java_home_path.exists:
fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' +
"does not exist." % (java_home, str(java_home_path)))
repository_ctx.file(
"WORKSPACE",
"# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" +
"workspace(name = \"{name}\")\n".format(name = repository_ctx.name),
)
extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else ""
java_bin = java_home_path.get_child("bin").get_child("java" + extension)
if not java_bin.exists:
# Java binary does not exist
repository_ctx.file(
"BUILD.bazel",
_NOJDK_BUILD_TPL.format(
local_jdk = repository_ctx.name,
java_binary = "bin/java" + extension,
java_home = java_home,
),
False,
)
return
# Detect version
version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin)
# Prepare BUILD file using "local_java_runtime" macro
build_file = ""
if repository_ctx.attr.build_file != None:
build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))
runtime_name = '"jdk"' if repository_ctx.attr.build_file else None
local_java_runtime_macro = """
local_java_runtime(
name = "%s",
runtime_name = %s,
java_home = "%s",
version = "%s",
)
""" % (repository_ctx.name, runtime_name, java_home, version)
repository_ctx.file(
"BUILD.bazel",
'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' +
build_file +
local_java_runtime_macro,
)
# Symlink all files
for file in repository_ctx.path(java_home).readdir():
repository_ctx.symlink(file, file.basename)
# Build file template, when JDK does not exist
_NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule")
fail_rule(
name = "jdk",
header = "Auto-Configuration Error:",
message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " +
"PATH or specify Java from remote repository (e.g. " +
"--java_runtime_version=remotejdk_11")
)
config_setting(
name = "localjdk_setting",
values = {{"java_runtime_version": "{local_jdk}"}},
visibility = ["//visibility:private"],
)
toolchain(
name = "runtime_toolchain_definition",
target_settings = [":localjdk_setting"],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = ":jdk",
)
'''
_local_java_repository_rule = repository_rule(
implementation = _local_java_repository_impl,
local = True,
configure = True,
attrs = {
"java_home": attr.string(),
"version": attr.string(),
"build_file": attr.label(),
},
)
def local_java_repository(name, java_home, version = "", build_file = None):
"""Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain.
Toolchain resolution is constrained with --java_runtime_version flag
having value of the "name" or "version" parameter.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation.
If there is no JDK "virtual" targets are created, which fail only when actually needed.
Args:
name: A unique name for this rule.
java_home: Location of the JDK imported.
build_file: optionally BUILD file template
version: optionally java version
"""
_local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file)
native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
|
tensorflow/contrib/metrics/__init__.py | DEVESHTARASIA/tensorflow | 384 | 4925 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the @{$python/contrib.metrics} guide.
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@set_difference
@@set_intersection
@@set_size
@@set_union
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
eth/vm/forks/petersburg/blocks.py | ggs134/py-evm | 1,641 | 4943 | <reponame>ggs134/py-evm
from rlp.sedes import (
CountableList,
)
from eth.rlp.headers import (
BlockHeader,
)
from eth.vm.forks.byzantium.blocks import (
ByzantiumBlock,
)
from .transactions import (
PetersburgTransaction,
)
class PetersburgBlock(ByzantiumBlock):
transaction_builder = PetersburgTransaction
fields = [
('header', BlockHeader),
('transactions', CountableList(transaction_builder)),
('uncles', CountableList(BlockHeader))
]
|
numba/stencils/stencil.py | auderson/numba | 6,620 | 4959 | <reponame>auderson/numba<filename>numba/stencils/stencil.py<gh_stars>1000+
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import copy
import numpy as np
from llvmlite import ir as lir
from numba.core import types, typing, utils, ir, config, ir_utils, registry
from numba.core.typing.templates import (CallableTemplate, signature,
infer_global, AbstractTemplate)
from numba.core.imputils import lower_builtin
from numba.core.extending import register_jitable
from numba.core.errors import NumbaValueError
from numba.misc.special import literal_unroll
import numba
import operator
from numba.np import numpy_support
class StencilFuncLowerer(object):
'''Callable class responsible for lowering calls to a specific StencilFunc.
'''
def __init__(self, sf):
self.stencilFunc = sf
def __call__(self, context, builder, sig, args):
cres = self.stencilFunc.compile_for_argtys(sig.args, {},
sig.return_type, None)
res = context.call_internal(builder, cres.fndesc, sig, args)
context.add_linking_libs([cres.library])
return res
@register_jitable
def raise_if_incompatible_array_sizes(a, *args):
ashape = a.shape
# We need literal_unroll here because the stencil might take
# multiple input arrays with different types that are not compatible
# (e.g. values as float[:] and flags as bool[:])
# When more than three total arrays are given, the second and third
# are iterated over in the loop below. Without literal_unroll, their
# types have to match.
# An example failing signature without literal_unroll might be
# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)
for arg in literal_unroll(args):
if a.ndim != arg.ndim:
raise ValueError("Secondary stencil array does not have same number "
" of dimensions as the first stencil input.")
argshape = arg.shape
for i in range(len(ashape)):
if ashape[i] > argshape[i]:
raise ValueError("Secondary stencil array has some dimension "
"smaller the same dimension in the first "
"stencil input.")
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
class StencilFunc(object):
"""
A special type to hold stencil information for the IR.
"""
id_counter = 0
def __init__(self, kernel_ir, mode, options):
self.id = type(self).id_counter
type(self).id_counter += 1
self.kernel_ir = kernel_ir
self.mode = mode
self.options = options
self.kws = [] # remember original kws arguments
# stencils only supported for CPU context currently
self._typingctx = registry.cpu_target.typing_context
self._targetctx = registry.cpu_target.target_context
self._typingctx.refresh()
self._targetctx.refresh()
self._install_type(self._typingctx)
self.neighborhood = self.options.get("neighborhood")
self._type_cache = {}
self._lower_me = StencilFuncLowerer(self)
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
Returns the block labels that contained return statements.
"""
ret_blocks = []
for label, block in blocks.items():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
ret_blocks.append(label)
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
return ret_blocks
def add_indices_to_kernel(self, kernel, index_names, ndim,
neighborhood, standard_indexed, typemap, calltypes):
"""
Transforms the stencil kernel as specified by the user into one
that includes each dimension's index variable as part of the getitem
calls. So, in effect array[-1] becomes array[index0-1].
"""
const_dict = {}
kernel_consts = []
if config.DEBUG_ARRAY_OPT >= 1:
print("add_indices_to_kernel", ndim, neighborhood)
ir_utils.dump_blocks(kernel.blocks)
if neighborhood is None:
need_to_calc_kernel = True
else:
need_to_calc_kernel = False
if len(neighborhood) != ndim:
raise ValueError("%d dimensional neighborhood specified for %d " \
"dimensional input array" % (len(neighborhood), ndim))
tuple_table = ir_utils.get_tuple_table(kernel.blocks)
relatively_indexed = set()
for block in kernel.blocks.values():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Const)):
if config.DEBUG_ARRAY_OPT >= 1:
print("remembering in const_dict", stmt.target.name,
stmt.value.value)
# Remember consts for use later.
const_dict[stmt.target.name] = stmt.value.value
if ((isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['setitem', 'static_setitem']
and stmt.value.value.name in kernel.arg_names) or
(isinstance(stmt, ir.SetItem)
and stmt.target.name in kernel.arg_names)):
raise ValueError("Assignments to arrays passed to stencil " \
"kernels is not allowed.")
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['getitem', 'static_getitem']
and stmt.value.value.name in kernel.arg_names
and stmt.value.value.name not in standard_indexed):
# We found a getitem from the input array.
if stmt.value.op == 'getitem':
stmt_index_var = stmt.value.index
else:
stmt_index_var = stmt.value.index_var
# allow static_getitem since rewrite passes are applied
#raise ValueError("Unexpected static_getitem in add_indices_to_kernel.")
relatively_indexed.add(stmt.value.value.name)
# Store the index used after looking up the variable in
# the const dictionary.
if need_to_calc_kernel:
assert hasattr(stmt_index_var, 'name')
if stmt_index_var.name in tuple_table:
kernel_consts += [tuple_table[stmt_index_var.name]]
elif stmt_index_var.name in const_dict:
kernel_consts += [const_dict[stmt_index_var.name]]
else:
raise NumbaValueError("stencil kernel index is not "
"constant, 'neighborhood' option required")
if ndim == 1:
# Single dimension always has index variable 'index0'.
# tmpvar will hold the real index and is computed by
# adding the relative offset in stmt.value.index to
# the current absolute location in index0.
index_var = ir.Var(scope, index_names[0], loc)
tmpname = ir_utils.mk_unique_var("stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
stmt_index_var_typ = typemap[stmt_index_var.name]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(stmt_index_var_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
acc_call = ir.Expr.binop(operator.add, stmt_index_var,
index_var, loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
index_vars = []
sum_results = []
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
const_index_vars = []
ind_stencils = []
stmt_index_var_typ = typemap[stmt_index_var.name]
# Same idea as above but you have to extract
# individual elements out of the tuple indexing
# expression and add the corresponding index variable
# to them and then reconstitute as a tuple that can
# index the array.
for dim in range(ndim):
tmpname = ir_utils.mk_unique_var("const_index")
tmpvar = ir.Var(scope, tmpname, loc)
new_body.append(ir.Assign(ir.Const(dim, loc),
tmpvar, loc))
const_index_vars += [tmpvar]
index_var = ir.Var(scope, index_names[dim], loc)
index_vars += [index_var]
tmpname = ir_utils.mk_unique_var("ind_stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
ind_stencils += [tmpvar]
getitemname = ir_utils.mk_unique_var("getitem")
getitemvar = ir.Var(scope, getitemname, loc)
getitemcall = ir.Expr.getitem(stmt_index_var,
const_index_vars[dim], loc)
new_body.append(ir.Assign(getitemcall, getitemvar, loc))
# Get the type of this particular part of the index tuple.
if isinstance(stmt_index_var_typ, types.ConstSized):
one_index_typ = stmt_index_var_typ[dim]
else:
one_index_typ = stmt_index_var_typ[:]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(one_index_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
else:
acc_call = ir.Expr.binop(operator.add, getitemvar,
index_vars[dim], loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
tuple_call = ir.Expr.build_tuple(ind_stencils, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value,s_index_var,loc),
stmt.target,loc))
else:
new_body.append(stmt)
block.body = new_body
if need_to_calc_kernel:
# Find the size of the kernel by finding the maximum absolute value
# index used in the kernel specification.
neighborhood = [[0,0] for _ in range(ndim)]
if len(kernel_consts) == 0:
raise NumbaValueError("Stencil kernel with no accesses to "
"relatively indexed arrays.")
for index in kernel_consts:
if isinstance(index, tuple) or isinstance(index, list):
for i in range(len(index)):
te = index[i]
if isinstance(te, ir.Var) and te.name in const_dict:
te = const_dict[te.name]
if isinstance(te, int):
neighborhood[i][0] = min(neighborhood[i][0], te)
neighborhood[i][1] = max(neighborhood[i][1], te)
else:
raise NumbaValueError(
"stencil kernel index is not constant,"
"'neighborhood' option required")
index_len = len(index)
elif isinstance(index, int):
neighborhood[0][0] = min(neighborhood[0][0], index)
neighborhood[0][1] = max(neighborhood[0][1], index)
index_len = 1
else:
raise NumbaValueError(
"Non-tuple or non-integer used as stencil index.")
if index_len != ndim:
raise NumbaValueError(
"Stencil index does not match array dimensionality.")
return (neighborhood, relatively_indexed)
def get_return_type(self, argtys):
if config.DEBUG_ARRAY_OPT >= 1:
print("get_return_type", argtys)
ir_utils.dump_blocks(self.kernel_ir.blocks)
if not isinstance(argtys[0], types.npytypes.Array):
raise NumbaValueError("The first argument to a stencil kernel must "
"be the primary input array.")
from numba.core import typed_passes
typemap, return_type, calltypes, _ = typed_passes.type_inference_stage(
self._typingctx,
self._targetctx,
self.kernel_ir,
argtys,
None,
{})
if isinstance(return_type, types.npytypes.Array):
raise NumbaValueError(
"Stencil kernel must return a scalar and not a numpy array.")
real_ret = types.npytypes.Array(return_type, argtys[0].ndim,
argtys[0].layout)
return (real_ret, typemap, calltypes)
def _install_type(self, typingctx):
"""Constructs and installs a typing class for a StencilFunc object in
the input typing context.
"""
_ty_cls = type('StencilFuncTyping_' +
str(self.id),
(AbstractTemplate,),
dict(key=self, generic=self._type_me))
typingctx.insert_user_function(self, _ty_cls)
def compile_for_argtys(self, argtys, kwtys, return_type, sigret):
# look in the type cache to find if result array is passed
(_, result, typemap, calltypes) = self._type_cache[argtys]
new_func = self._stencil_wrapper(result, sigret, return_type,
typemap, calltypes, *argtys)
return new_func
def _type_me(self, argtys, kwtys):
"""
Implement AbstractTemplate.generic() for the typing class
built by StencilFunc._install_type().
Return the call-site signature.
"""
if (self.neighborhood is not None and
len(self.neighborhood) != argtys[0].ndim):
raise NumbaValueError("%d dimensional neighborhood specified "
"for %d dimensional input array" %
(len(self.neighborhood), argtys[0].ndim))
argtys_extra = argtys
sig_extra = ""
result = None
if 'out' in kwtys:
argtys_extra += (kwtys['out'],)
sig_extra += ", out=None"
result = kwtys['out']
if 'neighborhood' in kwtys:
argtys_extra += (kwtys['neighborhood'],)
sig_extra += ", neighborhood=None"
# look in the type cache first
if argtys_extra in self._type_cache:
(_sig, _, _, _) = self._type_cache[argtys_extra]
return _sig
(real_ret, typemap, calltypes) = self.get_return_type(argtys)
sig = signature(real_ret, *argtys_extra)
dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format(
",".join(self.kernel_ir.arg_names), sig_extra))
exec(dummy_text) in globals(), locals()
dummy_func = eval("__numba_dummy_stencil")
sig = sig.replace(pysig=utils.pysignature(dummy_func))
self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)])
self._type_cache[argtys_extra] = (sig, result, typemap, calltypes)
return sig
def copy_ir_with_calltypes(self, ir, calltypes):
"""
Create a copy of a given IR along with its calltype information.
We need a copy of the calltypes because copy propagation applied
to the copied IR will change the calltypes and make subsequent
uses of the original IR invalid.
"""
copy_calltypes = {}
kernel_copy = ir.copy()
kernel_copy.blocks = {}
# For each block...
for (block_label, block) in ir.blocks.items():
new_block = copy.deepcopy(ir.blocks[block_label])
new_block.body = []
# For each statement in each block...
for stmt in ir.blocks[block_label].body:
# Copy the statement to the new copy of the kernel
# and if the original statement is in the original
# calltypes then add the type associated with this
# statement to the calltypes copy.
scopy = copy.deepcopy(stmt)
new_block.body.append(scopy)
if stmt in calltypes:
copy_calltypes[scopy] = calltypes[stmt]
kernel_copy.blocks[block_label] = new_block
return (kernel_copy, copy_calltypes)
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimensions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks,
in_cps,
name_var_table,
typemap,
copy_calltypes)
if "out" in name_var_table:
raise NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT >= 1:
print("_stencil_wrapper", return_type, return_type.dtype,
type(return_type.dtype), args)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i),
name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood",
name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise NumbaValueError("The first argument to a stencil kernel must "
"use relative indexing, not standard indexing.")
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise NumbaValueError("Standard indexing requested for an array name "
"not present in the stencil kernel definition.")
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim,
self.neighborhood, standard_indexed, typemap, copy_calltypes)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT >= 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks,
index_vars, out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(stencil_func_name,
",".join(kernel_copy.arg_names), sig_extra)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# Converts cval to a string constant
def cval_as_str(cval):
if not np.isfinite(cval):
# See if this is a string-repr numerical const, issue #7286
if np.isnan(cval):
return "np.nan"
elif np.isinf(cval):
if cval < 0:
return "-np.inf"
else:
return "np.inf"
else:
return str(cval)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
return_type_name = numpy_support.as_dtype(
return_type.dtype).type.__name__
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init ="{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval_as_str(cval),
return_type_name)
else:
out_init ="{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type_name)
func_text += " " + out_init
else: # result is present, if cval is set then use it
if "cval" in self.options:
cval = self.options["cval"]
cval_ty = typing.typeof.typeof(cval)
if not self._typingctx.can_convert(cval_ty, return_type.dtype):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init = "{}[:] = {}\n".format(out_name, cval_as_str(cval))
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),"
"{}[{}]-max(0,{})):\n").format(
index_vars[i],
ranges[i][0],
shape_name,
i,
ranges[i][1])
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
from numba.core import compiler
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = ([sentinel_name, out_name, neighborhood_name,
shape_name] + kernel_copy.arg_names + index_vars)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift labels in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT >= 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if (isinstance( inst, ir.Assign) and
inst.target.name == sentinel_name):
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for (l, b) in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(
ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert(isinstance(the_array, types.Type))
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT >= 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{})
return new_func
def __call__(self, *args, **kwargs):
if (self.neighborhood is not None and
len(self.neighborhood) != args[0].ndim):
raise ValueError("{} dimensional neighborhood specified for {} "
"dimensional input array".format(
len(self.neighborhood), args[0].ndim))
if 'out' in kwargs:
result = kwargs['out']
rdtype = result.dtype
rttype = numpy_support.from_dtype(rdtype)
result_type = types.npytypes.Array(rttype, result.ndim,
numpy_support.map_layout(result))
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = tuple([typing.typeof.typeof(x) for x in args] +
[result_type])
else:
result = None
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = array_types
if config.DEBUG_ARRAY_OPT >= 1:
print("__call__", array_types, args, kwargs)
(real_ret, typemap, calltypes) = self.get_return_type(array_types)
new_func = self._stencil_wrapper(result, None, real_ret, typemap,
calltypes, *array_types_full)
if result is None:
return new_func.entry_point(*args)
else:
return new_func.entry_point(*(args+(result,)))
def stencil(func_or_mode='constant', **options):
# called on function without specifying mode style
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
wrapper = _stencil(mode, options)
if func is not None:
return wrapper(func)
return wrapper
def _stencil(mode, options):
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
def decorated(func):
from numba.core import compiler
kernel_ir = compiler.run_frontend(func)
return StencilFunc(kernel_ir, mode, options)
return decorated
@lower_builtin(stencil)
def stencil_dummy_lower(context, builder, sig, args):
"lowering for dummy stencil calls"
return lir.Constant(lir.IntType(types.intp.bitwidth), 0)
|
runway/core/providers/__init__.py | troyready/runway | 134 | 4963 | """Runway providers."""
|
demos/restful-users/index.py | karldoenitz/karlooper | 161 | 4972 | # -*-encoding:utf-8-*-
import os
from karlooper.web.application import Application
from karlooper.web.request import Request
class UsersHandler(Request):
def get(self):
return self.render("/user-page.html")
class UserInfoHandler(Request):
def post(self):
print(self.get_http_request_message())
size = self.get_parameter("user_size", 0)
size = int(size)
user_list = [{"name": "name_%d" % i, "gender": "male", "age": i + 10} for i in range(size)]
result = {
"status": 0,
"message": "OK",
"data": user_list
}
return self.response_as_json(result)
url_mapping = {
"/users": UsersHandler,
"/user-info": UserInfoHandler
}
settings = {
"template": os.getcwd() + "/templates",
"static": os.getcwd() + "/templates",
"log_enable": False,
"debug": True
}
if __name__ == '__main__':
application = Application(url_mapping, settings=settings)
application.listen(port=8080)
application.run()
|
conans/server/server_launcher.py | Wonders11/conan | 6,205 | 4991 | <gh_stars>1000+
from conans.server.launcher import ServerLauncher
from conans.util.env_reader import get_env
launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME"))
app = launcher.server.root_app
def main(*args):
launcher.launch()
if __name__ == "__main__":
main()
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py | praveenkuttappan/azure-sdk-for-python | 2,728 | 4992 | <filename>sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccessPolicyEntity(ProxyResource):
"""Access policies help define the authentication rules, and control access to specific video resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param role: Defines the access level granted by this policy. Possible values include:
"Reader".
:type role: str or ~video_analyzer.models.AccessPolicyRole
:param authentication: Authentication method to be used when validating client API access.
:type authentication: ~video_analyzer.models.AuthenticationBase
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'role': {'key': 'properties.role', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntity, self).__init__(**kwargs)
self.role = kwargs.get('role', None)
self.authentication = kwargs.get('authentication', None)
class AccessPolicyEntityCollection(msrest.serialization.Model):
"""A collection of AccessPolicyEntity items.
:param value: A collection of AccessPolicyEntity items.
:type value: list[~video_analyzer.models.AccessPolicyEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AccountEncryption(msrest.serialization.Model):
"""Defines how the Video Analyzer account is (optionally) encrypted.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~video_analyzer.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~video_analyzer.models.KeyVaultProperties
:param identity: The Key Vault identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Key Vault mapping.
:vartype status: str
"""
_validation = {
'type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
self.identity = kwargs.get('identity', None)
self.status = None
class AudioEncoderBase(msrest.serialization.Model):
"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioEncoderAac.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}
}
def __init__(
self,
**kwargs
):
super(AudioEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
class AudioEncoderAac(AudioEncoderBase):
"""A custom preset for encoding audio with the AAC codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AudioEncoderAac, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str
class AuthenticationBase(msrest.serialization.Model):
"""Base class for access policies authentication methods.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JwtAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}
}
def __init__(
self,
**kwargs
):
super(AuthenticationBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CertificateSource(msrest.serialization.Model):
"""Base class for certificate sources.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PemCertificateList.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}
}
def __init__(
self,
**kwargs
):
super(CertificateSource, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:param name: The name of the resource for which availability needs to be checked.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:param name_available: Indicates if the resource name is available.
:type name_available: bool
:param reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason
:param message: Detailed reason why the given name is available.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class CredentialsBase(msrest.serialization.Model):
"""Base class for credential objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UsernamePasswordCredentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'}
}
def __init__(
self,
**kwargs
):
super(CredentialsBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TokenKey(msrest.serialization.Model):
"""Key properties for JWT token validation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EccTokenKey, RsaTokenKey.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'}
}
def __init__(
self,
**kwargs
):
super(TokenKey, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.kid = kwargs['kid']
class EccTokenKey(TokenKey):
"""Required validation properties for tokens generated with Elliptical Curve algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible
values include: "ES256", "ES384", "ES512".
:type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo
:param x: Required. X coordinate.
:type x: str
:param y: Required. Y coordinate.
:type y: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'x': {'required': True},
'y': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'x': {'key': 'x', 'type': 'str'},
'y': {'key': 'y', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EccTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str
self.alg = kwargs['alg']
self.x = kwargs['x']
self.y = kwargs['y']
class EdgeModuleEntity(ProxyResource):
"""The representation of an edge module.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module.
:vartype edge_module_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'edge_module_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntity, self).__init__(**kwargs)
self.edge_module_id = None
class EdgeModuleEntityCollection(msrest.serialization.Model):
"""A collection of EdgeModuleEntity items.
:param value: A collection of EdgeModuleEntity items.
:type value: list[~video_analyzer.models.EdgeModuleEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EdgeModuleEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class EdgeModuleProvisioningToken(msrest.serialization.Model):
"""Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer
IoT edge module must be initialized and connected to the Internet prior to the token expiration
date.
:vartype expiration_date: ~datetime.datetime
:ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through
the Azure IoT Edge module twin properties.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleProvisioningToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class EncoderPresetBase(msrest.serialization.Model):
"""Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderCustomPreset, EncoderSystemPreset.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'}
}
def __init__(
self,
**kwargs
):
super(EncoderPresetBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class EncoderCustomPreset(EncoderPresetBase):
"""Describes a custom preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param audio_encoder: Describes a custom preset for encoding audio.
:type audio_encoder: ~video_analyzer.models.AudioEncoderBase
:param video_encoder: Describes a custom preset for encoding video.
:type video_encoder: ~video_analyzer.models.VideoEncoderBase
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'},
'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderCustomPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str
self.audio_encoder = kwargs.get('audio_encoder', None)
self.video_encoder = kwargs.get('video_encoder', None)
class NodeBase(msrest.serialization.Model):
"""Base class for nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'}
}
def __init__(
self,
**kwargs
):
super(NodeBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.name = kwargs['name']
class ProcessorNodeBase(NodeBase):
"""Base class for topology processor nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderProcessor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}
}
def __init__(
self,
**kwargs
):
super(ProcessorNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str
self.inputs = kwargs['inputs']
class EncoderProcessor(ProcessorNodeBase):
"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param preset: Required. The encoder preset, which defines the recipe or instructions on how
the input content should be processed.
:type preset: ~video_analyzer.models.EncoderPresetBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'preset': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderProcessor, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str
self.preset = kwargs['preset']
class EncoderSystemPreset(EncoderPresetBase):
"""Describes a built-in preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Name of the built-in encoding preset. Possible values include:
"SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC",
"SingleLayer_2160p_H264_AAC".
:type name: str or ~video_analyzer.models.EncoderSystemPresetType
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncoderSystemPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str
self.name = kwargs['name']
class Endpoint(msrest.serialization.Model):
"""The endpoint details.
All required parameters must be populated in order to send to Azure.
:param endpoint_url: The URL of the endpoint.
:type endpoint_url: str
:param type: Required. The type of the endpoint. Possible values include: "ClientApi".
:type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoint, self).__init__(**kwargs)
self.endpoint_url = kwargs.get('endpoint_url', None)
self.type = kwargs['type']
class EndpointBase(msrest.serialization.Model):
"""Base class for endpoints.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TlsEndpoint, UnsecuredEndpoint.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}
}
def __init__(
self,
**kwargs
):
super(EndpointBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.credentials = kwargs['credentials']
self.url = kwargs['url']
self.tunnel = kwargs.get('tunnel', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~video_analyzer.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~video_analyzer.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class GroupLevelAccessControl(msrest.serialization.Model):
"""Group level network access control.
:param public_network_access: Whether or not public network access is allowed for specified
resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupLevelAccessControl, self).__init__(**kwargs)
self.public_network_access = kwargs.get('public_network_access', None)
class IotHub(msrest.serialization.Model):
"""The IoT Hub details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The IoT Hub resource identifier.
:type id: str
:param identity: Required. The IoT Hub identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Iot Hub mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'identity': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHub, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs['identity']
self.status = None
class JwtAuthentication(AuthenticationBase):
"""Properties for access validation based on JSON Web Tokens (JWT).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param issuers: List of expected token issuers. Token issuer is valid if it matches at least
one of the given values.
:type issuers: list[str]
:param audiences: List of expected token audiences. Token audience is valid if it matches at
least one of the given values.
:type audiences: list[str]
:param claims: List of additional token claims to be validated. Token must contains all claims
and respective values for it to be valid.
:type claims: list[~video_analyzer.models.TokenClaim]
:param keys: List of keys which can be used to validate access tokens. Having multiple keys
allow for seamless key rotation of the token signing key. Token signature must match exactly
one key.
:type keys: list[~video_analyzer.models.TokenKey]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'issuers': {'key': 'issuers', 'type': '[str]'},
'audiences': {'key': 'audiences', 'type': '[str]'},
'claims': {'key': 'claims', 'type': '[TokenClaim]'},
'keys': {'key': 'keys', 'type': '[TokenKey]'},
}
def __init__(
self,
**kwargs
):
super(JwtAuthentication, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str
self.issuers = kwargs.get('issuers', None)
self.audiences = kwargs.get('audiences', None)
self.claims = kwargs.get('claims', None)
self.keys = kwargs.get('keys', None)
class KeyVaultProperties(msrest.serialization.Model):
"""The details for accessing the encryption keys in Key Vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The
key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key
without a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including
the key version.
:vartype current_key_identifier: str
"""
_validation = {
'key_identifier': {'required': True},
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs['key_identifier']
self.current_key_identifier = None
class ListProvisioningTokenInput(msrest.serialization.Model):
"""The input parameters to generate registration token for the Azure Video Analyzer IoT edge module.
All required parameters must be populated in order to send to Azure.
:param expiration_date: Required. The desired expiration date of the registration token. The
Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to
the token expiration date.
:type expiration_date: ~datetime.datetime
"""
_validation = {
'expiration_date': {'required': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListProvisioningTokenInput, self).__init__(**kwargs)
self.expiration_date = kwargs['expiration_date']
class LivePipeline(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipeline, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LivePipelineCollection(msrest.serialization.Model):
"""A collection of LivePipeline items.
:param value: A collection of LivePipeline items.
:type value: list[~video_analyzer.models.LivePipeline]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LivePipeline]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class LivePipelineOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the live pipeline.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the live pipeline operation.
:vartype name: str
:ivar status: The status of the live pipeline operation.
:vartype status: str
:ivar error: The error details for the live pipeline operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class LivePipelineUpdate(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~video_analyzer.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~video_analyzer.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class NetworkAccessControl(msrest.serialization.Model):
"""Network access control for video analyzer account.
:param integration: Public network access for integration group.
:type integration: ~video_analyzer.models.GroupLevelAccessControl
:param ingestion: Public network access for ingestion group.
:type ingestion: ~video_analyzer.models.GroupLevelAccessControl
:param consumption: Public network access for consumption group.
:type consumption: ~video_analyzer.models.GroupLevelAccessControl
"""
_attribute_map = {
'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},
'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},
'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControl, self).__init__(**kwargs)
self.integration = kwargs.get('integration', None)
self.ingestion = kwargs.get('ingestion', None)
self.consumption = kwargs.get('consumption', None)
class NodeInput(msrest.serialization.Model):
"""Describes an input signal to be used on a pipeline node.
All required parameters must be populated in order to send to Azure.
:param node_name: Required. The name of the upstream node in the pipeline which output is used
as input of the current node.
:type node_name: str
"""
_validation = {
'node_name': {'required': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeInput, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~video_analyzer.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~video_analyzer.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~video_analyzer.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~video_analyzer.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ParameterDeclaration(msrest.serialization.Model):
"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter.
:type name: str
:param type: Required. Type of the parameter. Possible values include: "String",
"SecretString", "Int", "Double", "Bool".
:type type: str or ~video_analyzer.models.ParameterType
:param description: Description of the parameter.
:type description: str
:param default: The default value for the parameter to be used if the pipeline does not specify
a value.
:type default: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDeclaration, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.description = kwargs.get('description', None)
self.default = kwargs.get('default', None)
class ParameterDefinition(msrest.serialization.Model):
"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter declared in the pipeline topology.
:type name: str
:param value: Parameter value to be applied on this specific pipeline.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinition, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
class PemCertificateList(CertificateSource):
"""A list of PEM formatted certificates.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param certificates: Required. PEM formatted public certificates. One certificate per entry.
:type certificates: list[str]
"""
_validation = {
'type': {'required': True},
'certificates': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'certificates': {'key': 'certificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PemCertificateList, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str
self.certificates = kwargs['certificates']
class PipelineJob(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJob, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineJobCollection(msrest.serialization.Model):
"""A collection of PipelineJob items.
:param value: A collection of PipelineJob items.
:type value: list[~video_analyzer.models.PipelineJob]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineJob]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineJobError(msrest.serialization.Model):
"""Details about the error for a failed pipeline job.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class PipelineJobOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the pipeline job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the pipeline job operation.
:vartype name: str
:ivar status: The status of the pipeline job operation.
:vartype status: str
:ivar error: The error details for the pipeline job operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class PipelineJobUpdate(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineTopology(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Required. Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Required. Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopology, self).__init__(**kwargs)
self.kind = kwargs['kind']
self.sku = kwargs['sku']
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PipelineTopologyCollection(msrest.serialization.Model):
"""A collection of PipelineTopology items.
:param value: A collection of PipelineTopology items.
:type value: list[~video_analyzer.models.PipelineTopology]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineTopology]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineTopologyUpdate(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyUpdate, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.sku = kwargs.get('sku', None)
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~video_analyzer.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~video_analyzer.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~video_analyzer.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~video_analyzer.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""Metric properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~video_analyzer.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class ResourceIdentity(msrest.serialization.Model):
"""The user assigned managed identity to use when accessing a resource.
All required parameters must be populated in order to send to Azure.
:param user_assigned_identity: Required. The user assigned managed identity's resource
identifier to use when accessing a resource.
:type user_assigned_identity: str
"""
_validation = {
'user_assigned_identity': {'required': True},
}
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.user_assigned_identity = kwargs['user_assigned_identity']
class RsaTokenKey(TokenKey):
"""Required validation properties for tokens generated with RSA algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include:
"RS256", "RS384", "RS512".
:type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo
:param n: Required. RSA public key modulus.
:type n: str
:param e: Required. RSA public key exponent.
:type e: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'n': {'required': True},
'e': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'n': {'key': 'n', 'type': 'str'},
'e': {'key': 'e', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RsaTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str
self.alg = kwargs['alg']
self.n = kwargs['n']
self.e = kwargs['e']
class SourceNodeBase(NodeBase):
"""Base class for topology source nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RtspSource, VideoSource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'}
}
def __init__(
self,
**kwargs
):
super(SourceNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str
class RtspSource(SourceNodeBase):
"""RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When
using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the
RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are
interleaved in the HTTP connections alongside the RTSP messages. Possible values include:
"Http", "Tcp".
:type transport: str or ~video_analyzer.models.RtspTransport
:param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This
contains the required information for Video Analyzer to connect to RTSP cameras and/or generic
RTSP servers.
:type endpoint: ~video_analyzer.models.EndpointBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'endpoint': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'transport': {'key': 'transport', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'},
}
def __init__(
self,
**kwargs
):
super(RtspSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str
self.transport = kwargs.get('transport', None)
self.endpoint = kwargs['endpoint']
class TunnelBase(msrest.serialization.Model):
"""Base class for tunnel objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureIotDeviceRemoteTunnel.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'}
}
def __init__(
self,
**kwargs
):
super(TunnelBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureIotDeviceRemoteTunnel(TunnelBase):
"""A remote tunnel securely established using IoT Hub device information.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param iot_hub_name: Required. Name of the IoT Hub.
:type iot_hub_name: str
:param device_id: Required. The IoT device id to use when establishing the remote tunnel. This
string is case-sensitive.
:type device_id: str
"""
_validation = {
'type': {'required': True},
'iot_hub_name': {'required': True},
'device_id': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str
self.iot_hub_name = kwargs['iot_hub_name']
self.device_id = kwargs['device_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~video_analyzer.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~video_analyzer.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class SinkNodeBase(NodeBase):
"""Base class for topology sink nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSink.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'}
}
def __init__(
self,
**kwargs
):
super(SinkNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str
self.inputs = kwargs['inputs']
class Sku(msrest.serialization.Model):
"""The SKU details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1".
:type name: str or ~video_analyzer.models.SkuName
:ivar tier: The SKU tier. Possible values include: "Standard".
:vartype tier: str or ~video_analyzer.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
class StorageAccount(msrest.serialization.Model):
"""The details about the associated storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the storage account resource. Video Analyzer relies on tables,
queues, and blobs. The primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage).
:type id: str
:param identity: A managed identity that Video Analyzer will use to access the storage account.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the storage account mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs.get('identity', None)
self.status = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~video_analyzer.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~video_analyzer.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TimeSequenceBase(msrest.serialization.Model):
"""A sequence of datetime ranges as a string.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSequenceAbsoluteTimeMarkers.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}
}
def __init__(
self,
**kwargs
):
super(TimeSequenceBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TlsEndpoint(EndpointBase):
"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
:param trusted_certificates: List of trusted certificate authorities when authenticating a TLS
connection. A null list designates that Azure Video Analyzer's list of trusted authorities
should be used.
:type trusted_certificates: ~video_analyzer.models.CertificateSource
:param validation_options: Validation options to use when authenticating a TLS connection. By
default, strict validation is used.
:type validation_options: ~video_analyzer.models.TlsValidationOptions
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},
'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},
}
def __init__(
self,
**kwargs
):
super(TlsEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str
self.trusted_certificates = kwargs.get('trusted_certificates', None)
self.validation_options = kwargs.get('validation_options', None)
class TlsValidationOptions(msrest.serialization.Model):
"""Options for controlling the validation of TLS endpoints.
:param ignore_hostname: When set to 'true' causes the certificate subject name validation to be
skipped. Default is 'false'.
:type ignore_hostname: str
:param ignore_signature: When set to 'true' causes the certificate chain trust validation to be
skipped. Default is 'false'.
:type ignore_signature: str
"""
_attribute_map = {
'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},
'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TlsValidationOptions, self).__init__(**kwargs)
self.ignore_hostname = kwargs.get('ignore_hostname', None)
self.ignore_signature = kwargs.get('ignore_signature', None)
class TokenClaim(msrest.serialization.Model):
"""Properties for expected token claims.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the claim which must be present on the token.
:type name: str
:param value: Required. Expected value of the claim to be present on the token.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenClaim, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class UnsecuredEndpoint(EndpointBase):
"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
def __init__(
self,
**kwargs
):
super(UnsecuredEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
class UserAssignedManagedIdentity(msrest.serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar client_id: The client ID.
:vartype client_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
"""
_validation = {
'client_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedManagedIdentity, self).__init__(**kwargs)
self.client_id = None
self.principal_id = None
class UsernamePasswordCredentials(CredentialsBase):
"""Username and password credentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param username: Required. Username to be presented as part of the credentials.
:type username: str
:param password: Required. Password to be presented as part of the credentials. It is
recommended that this value is parameterized as a secret string in order to prevent this value
to be returned as part of the resource on API requests.
:type password: str
"""
_validation = {
'type': {'required': True},
'username': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsernamePasswordCredentials, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str
self.username = kwargs['username']
self.password = kwargs['password']
class VideoAnalyzer(TrackedResource):
"""The Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzer, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoAnalyzerCollection(msrest.serialization.Model):
"""A collection of VideoAnalyzer items.
:param value: A collection of VideoAnalyzer items.
:type value: list[~video_analyzer.models.VideoAnalyzer]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoAnalyzer]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class VideoAnalyzerIdentity(msrest.serialization.Model):
"""The managed identity for the Video Analyzer resource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type.
:type type: str
:param user_assigned_identities: The User Assigned Managed Identities.
:type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class VideoAnalyzerOperationStatus(msrest.serialization.Model):
"""Status of video analyzer operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model):
"""Status of private endpoint connection operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerUpdate(msrest.serialization.Model):
"""The update operation for a Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoArchival(msrest.serialization.Model):
"""Video archival properties.
:param retention_period: Video retention period indicates the maximum age of the video archive
segments which are intended to be kept in storage. It must be provided in the ISO8601 duration
format in the granularity of days, up to a maximum of 10 years. For example, if this is set to
P30D (30 days), content older than 30 days will be periodically deleted. This value can be
updated at any time and the new desired retention period will be effective within 24 hours.
:type retention_period: str
"""
_attribute_map = {
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoArchival, self).__init__(**kwargs)
self.retention_period = kwargs.get('retention_period', None)
class VideoContentToken(msrest.serialization.Model):
""""Video content token grants access to the video content URLs.".
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The content token expiration date in ISO8601 format (eg.
2021-01-01T00:00:00Z).
:vartype expiration_date: ~datetime.datetime
:ivar token: The content token value to be added to the video content URL as the value for the
"token" query string parameter. The token is specific to a single video.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoContentToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class VideoContentUrls(msrest.serialization.Model):
"""Set of URLs to the video content.
:param download_url: Video file download URL. This URL can be used in conjunction with the
video content authorization token to download the video MP4 file. The resulting MP4 file can be
played on any standard media player. It is available when the video type is 'file' and video
file is available for consumption.
:type download_url: str
:param archive_base_url: Video archive streaming base URL. The archived content can be
automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be
used in conjunction with the video content authorization token on any compatible DASH or HLS
players by appending the following to the base URL:
.. code-block::
- HLSv4: /manifest(format=m3u8-aapl).m3u8
- HLS CMAF: /manifest(format=m3u8-cmaf)
- DASH CMAF: /manifest(format=mpd-time-cmaf)
Moreover, an ongoing video recording can be played in "live mode" with latencies which are
approximately double of the chosen video segment length. It is available when the video type is
'archive' and video archiving is enabled.
:type archive_base_url: str
:param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically
played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in
conjunction with the video content authorization token to expose a WebSocket tunneled RTSP
stream. It is available when the video type is 'archive' and a live, low-latency feed is
available from the source.
:type rtsp_tunnel_url: str
:param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with
the video content authorization token to download the most recent still image from the video
archive in different resolutions. They are available when the video type is 'archive' and
preview images are enabled.
:type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls
"""
_attribute_map = {
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},
'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},
'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},
}
def __init__(
self,
**kwargs
):
super(VideoContentUrls, self).__init__(**kwargs)
self.download_url = kwargs.get('download_url', None)
self.archive_base_url = kwargs.get('archive_base_url', None)
self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)
self.preview_image_urls = kwargs.get('preview_image_urls', None)
class VideoCreationProperties(msrest.serialization.Model):
"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists.
:param title: Optional title provided by the user. Value can be up to 256 characters long.
:type title: str
:param description: Optional description provided by the user. Value can be up to 2048
characters long.
:type description: str
:param segment_length: Segment length indicates the length of individual content files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments. Changing this value after the initial call to create the
video resource can lead to errors when uploading content to the archive. Default value is 30
seconds. This property is only allowed for topologies where "kind" is set to "live".
:type segment_length: str
:param retention_period: Video retention period indicates how long the video is kept in
storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can
vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is
retained indefinitely. This property is only allowed for topologies where "kind" is set to
"live".
:type retention_period: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'segment_length': {'key': 'segmentLength', 'type': 'str'},
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoCreationProperties, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.segment_length = kwargs.get('segment_length', None)
self.retention_period = kwargs.get('retention_period', None)
class VideoEncoderBase(msrest.serialization.Model):
"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoEncoderH264.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}
}
def __init__(
self,
**kwargs
):
super(VideoEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.scale = kwargs.get('scale', None)
class VideoEncoderH264(VideoEncoderBase):
"""A custom preset for encoding video with the H.264 (AVC) codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
def __init__(
self,
**kwargs
):
super(VideoEncoderH264, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str
class VideoEntity(ProxyResource):
"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param title: Optional video title provided by the user. Value can be up to 256 characters
long.
:type title: str
:param description: Optional video description provided by the user. Value can be up to 2048
characters long.
:type description: str
:ivar type_properties_type: Video content type. Different content types are suitable for
different applications and scenarios. Possible values include: "Archive", "File".
:vartype type_properties_type: str or ~video_analyzer.models.VideoType
:ivar flags: Video flags contain information about the available video actions and its dynamic
properties based on the current video state.
:vartype flags: ~video_analyzer.models.VideoFlags
:ivar content_urls: Set of URLs to the video content.
:vartype content_urls: ~video_analyzer.models.VideoContentUrls
:param media_info: Contains information about the video and audio content.
:type media_info: ~video_analyzer.models.VideoMediaInfo
:param archival: Video archival properties.
:type archival: ~video_analyzer.models.VideoArchival
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'type_properties_type': {'readonly': True},
'flags': {'readonly': True},
'content_urls': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'title': {'key': 'properties.title', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'flags': {'key': 'properties.flags', 'type': 'VideoFlags'},
'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'},
'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'},
'archival': {'key': 'properties.archival', 'type': 'VideoArchival'},
}
def __init__(
self,
**kwargs
):
super(VideoEntity, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.type_properties_type = None
self.flags = None
self.content_urls = None
self.media_info = kwargs.get('media_info', None)
self.archival = kwargs.get('archival', None)
class VideoEntityCollection(msrest.serialization.Model):
"""A collection of VideoEntity items.
:param value: A collection of VideoEntity items.
:type value: list[~video_analyzer.models.VideoEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VideoFlags(msrest.serialization.Model):
"""Video flags contain information about the available video actions and its dynamic properties based on the current video state.
All required parameters must be populated in order to send to Azure.
:param can_stream: Required. Value indicating whether or not the video can be streamed. Only
"archive" type videos can be streamed.
:type can_stream: bool
:param has_data: Required. Value indicating whether or not there has ever been data recorded or
uploaded into the video. Newly created videos have this value set to false.
:type has_data: bool
:param is_in_use: Required. Value indicating whether or not the video is currently being
referenced be an active pipeline. The fact that is being referenced, doesn't necessarily
indicate that data is being received. For example, video recording may be gated on events or
camera may not be accessible at the time.
:type is_in_use: bool
"""
_validation = {
'can_stream': {'required': True},
'has_data': {'required': True},
'is_in_use': {'required': True},
}
_attribute_map = {
'can_stream': {'key': 'canStream', 'type': 'bool'},
'has_data': {'key': 'hasData', 'type': 'bool'},
'is_in_use': {'key': 'isInUse', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VideoFlags, self).__init__(**kwargs)
self.can_stream = kwargs['can_stream']
self.has_data = kwargs['has_data']
self.is_in_use = kwargs['is_in_use']
class VideoMediaInfo(msrest.serialization.Model):
"""Contains information about the video and audio content.
:param segment_length: Video segment length indicates the length of individual video files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments.
:type segment_length: str
"""
_attribute_map = {
'segment_length': {'key': 'segmentLength', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoMediaInfo, self).__init__(**kwargs)
self.segment_length = kwargs.get('segment_length', None)
class VideoPreviewImageUrls(msrest.serialization.Model):
"""Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled.
:param small: Low resolution preview image URL.
:type small: str
:param medium: Medium resolution preview image URL.
:type medium: str
:param large: High resolution preview image URL.
:type large: str
"""
_attribute_map = {
'small': {'key': 'small', 'type': 'str'},
'medium': {'key': 'medium', 'type': 'str'},
'large': {'key': 'large', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPreviewImageUrls, self).__init__(**kwargs)
self.small = kwargs.get('small', None)
self.medium = kwargs.get('medium', None)
self.large = kwargs.get('large', None)
class VideoPublishingOptions(msrest.serialization.Model):
"""Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live".
:param disable_archive: When set to 'true' content will not be archived or recorded. This is
used, for example, when the topology is used only for low latency video streaming. Default is
'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'.
:type disable_archive: str
:param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published,
disabling low latency streaming. This is used, for example, when the topology is used only for
archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to
'false'.
:type disable_rtsp_publishing: str
"""
_attribute_map = {
'disable_archive': {'key': 'disableArchive', 'type': 'str'},
'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPublishingOptions, self).__init__(**kwargs)
self.disable_archive = kwargs.get('disable_archive', None)
self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None)
class VideoScale(msrest.serialization.Model):
"""The video scaling information.
:param height: The desired output video height.
:type height: str
:param width: The desired output video width.
:type width: str
:param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode
is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is
'PreserveAspectRatio' then only one of width or height need be provided. Possible values
include: "Pad", "PreserveAspectRatio", "Stretch".
:type mode: str or ~video_analyzer.models.VideoScaleMode
"""
_attribute_map = {
'height': {'key': 'height', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoScale, self).__init__(**kwargs)
self.height = kwargs.get('height', None)
self.width = kwargs.get('width', None)
self.mode = kwargs.get('mode', None)
class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase):
"""A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z",
"2021-10-05T03:40:00Z"]]'.
:type ranges: str
"""
_validation = {
'type': {'required': True},
'ranges': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'ranges': {'key': 'ranges', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str
self.ranges = kwargs['ranges']
class VideoSink(SinkNodeBase):
"""Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param video_name: Required. Name of a new or existing video resource used to capture and
publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then
no content is archived.
:type video_name: str
:param video_creation_properties: Optional video properties to be used in case a new video
resource needs to be created on the service.
:type video_creation_properties: ~video_analyzer.models.VideoCreationProperties
:param video_publishing_options: Options to change how the video sink publishes content via the
video resource. This property is only allowed for topologies where "kind" is set to "live".
:type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'video_name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'video_name': {'key': 'videoName', 'type': 'str'},
'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'},
'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'},
}
def __init__(
self,
**kwargs
):
super(VideoSink, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str
self.video_name = kwargs['video_name']
self.video_creation_properties = kwargs.get('video_creation_properties', None)
self.video_publishing_options = kwargs.get('video_publishing_options', None)
class VideoSource(SourceNodeBase):
"""Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param video_name: Required. Name of the Video Analyzer video resource to be used as the
source.
:type video_name: str
:param time_sequences: Required. Describes a sequence of datetime ranges. The video source only
picks up recorded media within these ranges.
:type time_sequences: ~video_analyzer.models.TimeSequenceBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'video_name': {'required': True},
'time_sequences': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'video_name': {'key': 'videoName', 'type': 'str'},
'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'},
}
def __init__(
self,
**kwargs
):
super(VideoSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str
self.video_name = kwargs['video_name']
self.time_sequences = kwargs['time_sequences']
|
codigo_das_aulas/aula_09/aula_09_03.py | VeirichR/curso-python-selenium | 234 | 5017 | from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
def esperar_elemento(elemento, webdriver):
print(f'Tentando encontrar "{elemento}"')
if webdriver.find_elements_by_css_selector(elemento):
return True
return False
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
|
env/lib/python3.6/site-packages/odf/meta.py | anthowen/duplify | 5,079 | 5051 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 <NAME>, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from odf.namespaces import METANS
from odf.element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
args.setdefault('type', 'simple')
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
|
flametree/utils.py | Edinburgh-Genome-Foundry/Flametree | 165 | 5056 | <gh_stars>100-1000
import os
import shutil
from .ZipFileManager import ZipFileManager
from .DiskFileManager import DiskFileManager
from .Directory import Directory
import string
printable = set(string.printable) - set("\x0b\x0c")
def is_hex(s):
return any(c not in printable for c in s)
def file_tree(target, replace=False):
"""Open a connection to a file tree which can be either a disk folder, a
zip archive, or an in-memory zip archive.
Parameters
----------
target
Either the path to a target folder, or a zip file, or '@memory' to write
a zip file in memory (at which case a string of the zip file is returned)
If the target is already a flametree directory, it is returned as-is.
replace
If True, will remove the target if it already exists. If False, new files
will be written inside the target and some files may be overwritten.
"""
if isinstance(target, Directory):
return target
if (not isinstance(target, str)) or is_hex(target):
return Directory(file_manager=ZipFileManager(source=target))
elif target == "@memory":
return Directory("@memory", file_manager=ZipFileManager("@memory"))
elif target.lower().endswith(".zip"):
return Directory(target, file_manager=ZipFileManager(target, replace=replace))
else:
return Directory(target, file_manager=DiskFileManager(target))
|
scripts/kconfig-split.py | Osirium/linuxkit | 7,798 | 5089 | <reponame>Osirium/linuxkit<gh_stars>1000+
#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
|
plugins/Operations/Crypto/blowfish_encrypt_dialog.py | nmantani/FileInsight-plugins | 120 | 5092 | <filename>plugins/Operations/Crypto/blowfish_encrypt_dialog.py
#
# Blowfish encrypt - Encrypt selected region with Blowfish
#
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import binascii
import re
import sys
import time
import tkinter
import tkinter.ttk
import tkinter.messagebox
try:
import Cryptodome.Cipher.Blowfish
import Cryptodome.Util.Padding
except ImportError:
exit(-1) # PyCryptodome is not installed
# Print selected items
def encrypt(data, root, cm, ckt, ek, cit, ei):
blowfish_mode = {"ECB":Cryptodome.Cipher.Blowfish.MODE_ECB,
"CBC":Cryptodome.Cipher.Blowfish.MODE_CBC,
"CFB":Cryptodome.Cipher.Blowfish.MODE_CFB,
"OFB":Cryptodome.Cipher.Blowfish.MODE_OFB,
"CTR":Cryptodome.Cipher.Blowfish.MODE_CTR}
mode = cm.get()
key_type = ckt.get()
key = ek.get()
iv_type = cit.get()
iv = ei.get()
if key_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", key):
key = binascii.a2b_hex(key)
else:
tkinter.messagebox.showerror("Error:", message="Key is not in hex format.")
return
else:
key = key.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and iv_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", iv):
iv = binascii.a2b_hex(iv)
else:
tkinter.messagebox.showerror("Error:", message="IV is not in hex format.")
return
else:
iv = iv.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and len(iv) != Cryptodome.Cipher.Blowfish.block_size:
tkinter.messagebox.showerror("Error:", message="IV size must be %d bytes." % Cryptodome.Cipher.Blowfish.block_size)
return
key_length = len(key)
if key_length < 4 or key_length > 56:
tkinter.messagebox.showerror("Error:", message="Key size must be in the range from 4 bytes and 56 bytes.")
return
try:
if mode == "CFB":
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv, segment_size=Cryptodome.Cipher.Blowfish.block_size * 8)
elif mode in ["CBC", "OFB"]:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv)
elif mode == "CTR": # The first seven bytes of IV are used as nonce and the last byte is used as initial_value (compatible with CyberChef).
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], nonce=iv[0:7], initial_value=iv[7])
else:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode])
if mode in ["ECB", "CBC"]:
data = Cryptodome.Util.Padding.pad(data, Cryptodome.Cipher.Blowfish.block_size)
d = cipher.encrypt(data)
except Exception as e:
tkinter.messagebox.showerror("Error:", message=e)
root.quit()
exit(1) # Not decrypted
sys.stdout.buffer.write(d)
root.quit()
exit(0) # Decrypted successfully
def combo_mode_selected(root, cm, cit, ei, lc):
mode = cm.get()
if mode == "ECB":
cit.configure(state = "disabled")
ei.configure(state = "disabled")
else:
cit.configure(state = "readonly")
ei.configure(state = "normal")
if mode == "CTR":
lc.grid()
else:
lc.grid_remove()
# Receive data
data = sys.stdin.buffer.read()
# Create input dialog
root = tkinter.Tk()
root.title("Blowfish encrypt")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_mode = tkinter.Label(root, text="Mode:")
label_mode.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_mode = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_mode["values"] = ("ECB", "CBC", "CFB", "OFB", "CTR")
combo_mode.current(0)
combo_mode.grid(row=0, column=1, padx=5, pady=5, sticky="w")
label_key_type = tkinter.Label(root, text="Key type:")
label_key_type.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_key_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_key_type["values"] = ("Text", "Hex")
combo_key_type.current(0)
combo_key_type.grid(row=1, column=1, padx=5, pady=5)
label_key = tkinter.Label(root, text="Key:")
label_key.grid(row=1, column=2, padx=5, pady=5, sticky="w")
entry_key = tkinter.Entry(width=32)
entry_key.grid(row=1, column=3, padx=5, pady=5, sticky="w")
entry_key.focus() # Focus to this widget
label_iv_type = tkinter.Label(root, text="IV type:")
label_iv_type.grid(row=2, column=0, padx=5, pady=5, sticky="w")
combo_iv_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_iv_type["values"] = ("Text", "Hex")
combo_iv_type.current(0)
combo_iv_type.grid(row=2, column=1, padx=5, pady=5)
label_iv = tkinter.Label(root, text="IV:")
label_iv.grid(row=2, column=2, padx=5, pady=5, sticky="w")
entry_iv = tkinter.Entry(width=32)
entry_iv.grid(row=2, column=3, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text="OK", command=(lambda data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)))
button.grid(row=3, column=0, padx=5, pady=5, columnspan=4)
label_ctr = tkinter.Label(root, text="Note:\nThe first seven bytes of IV are used as the nonce and the last one\nbyte is used as the initial value of the counter (compatible with\nCyberChef).", justify="left")
label_ctr.grid(row=4, column=0, padx=5, pady=5, columnspan=4, sticky="w")
label_ctr.grid_remove()
# Set callback functions
combo_mode.bind('<<ComboboxSelected>>', lambda event, root=root, cm=combo_mode, cit=combo_iv_type, ei=entry_iv, lc=label_ctr: combo_mode_selected(root, cm, cit, ei, lc))
combo_mode.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_key_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_key.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_iv_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_iv.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
button.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
# These are disabled in the initial state (ECB mode)
combo_iv_type.configure(state = "disabled")
entry_iv.configure(state = "disabled")
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
exit(1) # Not decrypted
|
tools/lib/auth.py | shoes22/openpilot | 121 | 5097 | #!/usr/bin/env python3
"""
Usage::
usage: auth.py [-h] [{google,apple,github,jwt}] [jwt]
Login to your comma account
positional arguments:
{google,apple,github,jwt}
jwt
optional arguments:
-h, --help show this help message and exit
Examples::
./auth.py # Log in with google account
./auth.py github # Log in with GitHub Account
./auth.py jwt ey......hw # Log in with a JWT from https://jwt.comma.ai, for use in CI
"""
import argparse
import sys
import pprint
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Dict
from urllib.parse import parse_qs, urlencode
from tools.lib.api import APIError, CommaApi, UnauthorizedError
from tools.lib.auth_config import set_token, get_token
PORT = 3000
class ClientRedirectServer(HTTPServer):
query_params: Dict[str, Any] = {}
class ClientRedirectHandler(BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith('/auth'):
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'Return to the CLI to continue')
def log_message(self, format, *args): # pylint: disable=redefined-builtin
pass # this prevent http server from dumping messages to stdout
def auth_redirect_link(method):
provider_id = {
'google': 'g',
'apple': 'a',
'github': 'h',
}[method]
params = {
'redirect_uri': f"https://api.comma.ai/v2/auth/{provider_id}/redirect/",
'state': f'service,localhost:{PORT}',
}
if method == 'google':
params.update({
'type': 'web_server',
'client_id': '45471411055-ornt4svd2miog6dnopve7qtmh5mnu6id.apps.googleusercontent.com',
'response_type': 'code',
'scope': 'https://www.googleapis.com/auth/userinfo.email',
'prompt': 'select_account',
})
return 'https://accounts.google.com/o/oauth2/auth?' + urlencode(params)
elif method == 'github':
params.update({
'client_id': '28c4ecb54bb7272cb5a4',
'scope': 'read:user',
})
return 'https://github.com/login/oauth/authorize?' + urlencode(params)
elif method == 'apple':
params.update({
'client_id': 'ai.comma.login',
'response_type': 'code',
'response_mode': 'form_post',
'scope': 'name email',
})
return 'https://appleid.apple.com/auth/authorize?' + urlencode(params)
else:
raise NotImplementedError(f"no redirect implemented for method {method}")
def login(method):
oauth_uri = auth_redirect_link(method)
web_server = ClientRedirectServer(('localhost', PORT), ClientRedirectHandler)
print(f'To sign in, use your browser and navigate to {oauth_uri}')
webbrowser.open(oauth_uri, new=2)
while True:
web_server.handle_request()
if 'code' in web_server.query_params:
break
elif 'error' in web_server.query_params:
print('Authentication Error: "%s". Description: "%s" ' % (
web_server.query_params['error'],
web_server.query_params.get('error_description')), file=sys.stderr)
break
try:
auth_resp = CommaApi().post('v2/auth/', data={'code': web_server.query_params['code'], 'provider': web_server.query_params['provider']})
set_token(auth_resp['access_token'])
except APIError as e:
print(f'Authentication Error: {e}', file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Login to your comma account')
parser.add_argument('method', default='google', const='google', nargs='?', choices=['google', 'apple', 'github', 'jwt'])
parser.add_argument('jwt', nargs='?')
args = parser.parse_args()
if args.method == 'jwt':
if args.jwt is None:
print("method JWT selected, but no JWT was provided")
exit(1)
set_token(args.jwt)
else:
login(args.method)
try:
me = CommaApi(token=get_token()).get('/v1/me')
print("Authenticated!")
pprint.pprint(me)
except UnauthorizedError:
print("Got invalid JWT")
exit(1)
|
src/xmltollvm.py | Tejvinder/thesis-ghidra | 101 | 5112 | from llvmlite import ir
import xml.etree.ElementTree as et
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int1 = ir.IntType(1)
void_type = ir.VoidType()
function_names = []
registers, functions, uniques, extracts = {}, {}, {}, {}
internal_functions = {}
memory = {}
flags = ["ZF", "CF", "OF", "SF"]
pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"]
def lift(filename):
root = et.parse(filename).getroot()
module = ir.Module(name="lifted")
for register in root.find('globals').findall('register'):
if register.get('name') in flags:
var = ir.GlobalVariable(module, ir.IntType(1), register.get('name'))
var.initializer = ir.Constant(ir.IntType(1), None)
var.linkage = 'internal'
registers[register.get('name')] = var
elif register.get('name') in pointers:
var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name'))
var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None)
var.linkage = 'internal'
registers[register.get('name')] = var
else:
var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None)
var.linkage = 'internal'
registers[register.get('name')] = var
for memory_location in root.find('memory').findall('memory'):
var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None)
var.linkage = 'internal'
memory[memory_location.get('name')] = var
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "intra_function_branch")
internal_functions["intra_function_branch"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "call_indirect")
internal_functions["call_indirect"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "bit_extraction")
internal_functions["bit_extraction"] = ir_func
for function in root.findall('function'):
name = function.get('name')
x = 1
while name in function_names:
name = name + "_" + str(x)
x += 1
function_names.append(name)
address = function.get('address')
functions[address] = [build_function(name, module), function]
for address in functions:
ir_func, function = functions[address]
populate_func(ir_func, function)
return module
def populate_func(ir_func, function):
builders, blocks = build_cfg(function, ir_func)
if blocks == {}:
return
populate_cfg(function, builders, blocks)
def build_function(name, module):
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, name)
return ir_func
def build_cfg(function, ir_func):
builders, blocks = {}, {}
instructions = function.find("instructions")
if instructions:
block = ir_func.append_basic_block("entry")
blocks["entry"] = block
builders["entry"] = ir.IRBuilder(block)
for instruction in instructions:
address = instruction.find("address").text
block = ir_func.append_basic_block(address)
blocks[address] = block
builders[address] = ir.IRBuilder(block)
return builders, blocks
# noinspection DuplicatedCode
def populate_cfg(function, builders, blocks):
builder = builders["entry"]
stack_size = 10 * 1024 * 1024
stack = builder.alloca(ir.IntType(8), stack_size, name="stack")
stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top")
builder.store(stack_top, registers["RSP"])
builder.branch(list(blocks.values())[1])
block_iterator = 1
instr = 0
quiter = False
for instruction in function.find("instructions"):
if quiter:
break
address = instruction.find("address").text
if address in builders:
builder = builders[address]
pcodes = instruction.find("pcodes")
pc = 0
no_branch = True
for pcode in pcodes:
pc += 1
mnemonic = pcode.find("name")
if mnemonic.text == "COPY":
output = pcode.find("output")
if output.text in flags and pcode.find("input_0").get("storage") == "constant":
source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0))
else:
source = fetch_input_varnode(builder, pcode.find("input_0"))
update_output(builder, pcode.find("output"), source)
elif mnemonic.text == "LOAD":
input_1 = pcode.find("input_1")
output = pcode.find("output")
rhs = fetch_input_varnode(builder, input_1)
if input_1.get("storage") == "unique" and output.get("storage") == "unique":
# This is incorrect. This is treating it as a copy, should load the memory address in the input 1
update_output(builder, output, rhs)
else:
if input_1.text in pointers:
rhs = builder.gep(rhs, [ir.Constant(int64, 0)])
result = builder.load(rhs)
update_output(builder, output, result)
elif mnemonic.text == "STORE":
input_1 = pcode.find("input_1") # target
input_2 = pcode.find("input_2") # source
rhs = fetch_input_varnode(builder, input_2)
lhs = fetch_output_varnode(input_1)
lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)])
if lhs2.type != rhs.type.as_pointer():
lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer())
builder.store(rhs, lhs2)
elif mnemonic.text == "BRANCH":
value = pcode.find("input_0").text[2:-2]
if value in functions:
target = functions[value][0]
builder.call(target, [])
elif value in blocks:
target = blocks[value]
builder.branch(target)
no_branch = False
else:
# weird jump into some label in another function
# might be solved with callbr instruction?
builder.call(internal_functions["intra_function_branch"], [])
elif mnemonic.text == "CBRANCH":
true_target = blocks[pcode.find("input_0").text[2:-2]]
false_target = list(blocks.values())[block_iterator + 1]
condition = fetch_input_varnode(builder, pcode.find("input_1"))
no_branch = False
builder.cbranch(condition, true_target, false_target)
elif mnemonic.text == "BRANCHIND":
no_branch = False
target = fetch_input_varnode(builder, pcode.find("input_0"))
if not target.type.is_pointer:
target = builder.inttoptr(target, target.type.as_pointer())
builder.branch_indirect(target)
elif mnemonic.text == "CALL":
target = functions[pcode.find("input_0").text[2:-2]][0]
builder.call(target, [])
elif mnemonic.text == "CALLIND":
# target = pcode.find("input_0").text[2:-2]
builder.call(internal_functions["call_indirect"], [])
elif mnemonic.text == "USERDEFINED":
raise Exception("Not implemented")
elif mnemonic.text == "RETURN":
input_1 = pcode.find("input_1")
no_branch = False
if input_1 is None:
builder.ret_void()
else:
raise Exception("Return value being passed")
elif mnemonic.text == "PIECE":
raise Exception("PIECE operation needs to be tested")
elif mnemonic.text == "SUBPIECE":
output = pcode.find("output")
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
if input_1.text == "0x0":
val = fetch_input_varnode(builder, input_0)
result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8))
update_output(builder, output, result)
else:
builder.call(internal_functions['bit_extraction'], [])
elif mnemonic.text == "INT_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('==', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NOTEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('!=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESSEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_ZEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_ADD":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.add(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SUB":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.sub(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_CARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.uadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SCARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SBORROW":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_2COMP":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.not_(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NEGATE":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_LEFT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.shl(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_RIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.lshr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SRIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.ashr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_MULT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.mul(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_DIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.div(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_REM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.urem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SDIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.sdiv(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SREM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.srem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "BOOL_NEGATE":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(lhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "FLOAT_EQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NOTEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESSEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ADD":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SUB":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_MULT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_DIV":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NEG":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ABS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SQRT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_CEIL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_FLOOR":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ROUND":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NAN":
raise Exception("Not implemented")
elif mnemonic.text == "INT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "TRUNC":
raise Exception("Not implemented")
elif mnemonic.text == "CPOOLREF":
raise Exception("Not implemented")
elif mnemonic.text == "NEW":
raise Exception("Not implemented")
elif mnemonic.text == "MULTIEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "INDIRECT":
raise Exception("Not implemented")
elif mnemonic.text == "PTRADD":
raise Exception("Not implemented")
elif mnemonic.text == "PTRSUB":
raise Exception("Not implemented")
elif mnemonic.text == "CAST":
raise Exception("Not implemented")
else:
raise Exception("Not a standard pcode instruction")
block_iterator += 1
instr += 1
if block_iterator < len(blocks) and no_branch:
builder.branch(list(blocks.values())[block_iterator])
def fetch_input_varnode(builder, name):
var_type = name.get("storage")
var_size = int(name.get("size")) * 8
if var_type == "register":
return builder.load(registers[name.text])
elif var_type == "unique":
if name.text not in list(uniques.keys()):
raise Exception("Temporary variable referenced before defined")
return uniques[name.text]
elif var_type == "constant":
var = ir.Constant(ir.IntType(var_size), int(name.text, 0))
return var
elif var_type == "memory":
return memory[name.text]
def update_output(builder, name, output):
var_type = name.get("storage")
if var_type == "register":
reg = registers[name.text]
if reg.type != output.type.as_pointer():
reg = builder.bitcast(reg, output.type.as_pointer())
builder.store(output, reg)
elif var_type == "unique":
uniques[name.text] = output
def fetch_output_varnode(name):
var_type = name.get("storage")
if var_type == "register":
return registers[name.text]
elif var_type == "unique":
if name.text not in uniques:
uniques[name.text] = None
return uniques[name.text]
def int_check_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs2 = lhs
lhs = builder.ptrtoint(lhs, target)
if lhs2 == rhs:
rhs = lhs
if rhs.type != target and lhs != rhs:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
return lhs, rhs
def check_shift_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, target)
else:
lhs = builder.zext(lhs, target)
if rhs.type != target:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
else:
rhs = builder.zext(rhs, target)
return lhs, rhs
def int_comparison_check_inputs(builder, lhs, rhs):
# For integer comparison operations. We assume rhs is the correct type.
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, rhs.type)
return lhs, rhs |
example/shovel/bar.py | demiurgestudios/shovel | 202 | 5123 | from shovel import task
@task
def hello(name='Foo'):
'''Prints "Hello, " followed by the provided name.
Examples:
shovel bar.hello
shovel bar.hello --name=Erin
http://localhost:3000/bar.hello?Erin'''
print('Hello, %s' % name)
@task
def args(*args):
'''Echos back all the args you give it.
This exists mostly to demonstrate the fact that shovel
is compatible with variable argument functions.
Examples:
shovel bar.args 1 2 3 4
http://localhost:3000/bar.args?1&2&3&4'''
for arg in args:
print('You said "%s"' % arg)
@task
def kwargs(**kwargs):
'''Echos back all the kwargs you give it.
This exists mostly to demonstrate that shovel is
compatible with the keyword argument functions.
Examples:
shovel bar.kwargs --foo=5 --bar 5 --howdy hey
http://localhost:3000/bar.kwargs?foo=5&bar=5&howdy=hey'''
for key, val in kwargs.items():
print('You said "%s" => "%s"' % (key, val)) |
scripts/external_libs/scapy-2.4.3/scapy/config.py | timgates42/trex-core | 956 | 5124 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import os
import re
import time
import socket
import sys
from scapy import VERSION, base_classes
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name=None, default=None,
hook=None, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
def _readonly(name):
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
return self[item] if item in self else default
def __repr__(self):
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(list):
def __init__(self):
list.__init__(self)
self.ldict = {}
def __repr__(self):
return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self)
def register(self, layer):
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
class CommandsList(list):
def __repr__(self):
s = []
for l in sorted(self, key=lambda x: x.__name__):
doc = l.__doc__.split("\n")[0] if l.__doc__ else "--"
s.append("%-20s: %s" % (l.__name__, doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self, item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item, v)
def update(self, other):
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
if self.timeout is None:
return dict.items(self)
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags = re.match(version_regexp, module.__version__)
if not version_tags:
return False
version_tags = version_tags.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags)
return version_tags >= minver
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (1, 7))
def isCryptographyRecent():
"""
Check if the cryptography library is recent (2.0 and later)
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0))
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
def _set_conf_sockets():
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
from scapy.main import _load
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap or conf.use_dnet:
try:
from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
# Update globals
_load("scapy.arch.pcapdnet")
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
# Update globals
_load("scapy.arch.bpf")
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
# Update globals
_load("scapy.arch.linux")
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val):
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
def _loglevel_changer(attr, val):
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501
iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501
debug_tls:When 1, print some TLS session secrets when they are computed.
recv_poll_rate: how often to check for new packets. Defaults to 0.05s.
"""
version = ReadOnlyAttribute("version", VERSION)
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
checkIPID = False
checkIPsrc = True
checkIPaddr = True
checkIPinIP = True
check_TCPerror_seqack = False
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = True
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L3socket6 = None
L2socket = None
L2listen = None
BTsocket = None
USBsocket = None
min_pkt_size = 60
bufsize = 2**16
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = False
debug_tls = False
wepkey = ""
cache_iflist = {}
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = True
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"),
_socket_changer
)
# XXX use_dnet is deprecated
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
extensions_paths = "."
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = None
# can, tls, http are not loaded by default
load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns',
'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet',
'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp',
'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios',
'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip',
'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp',
'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee']
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_recent = isCryptographyRecent()
crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
recv_poll_rate = 0.05
def __getattr__(self, attr):
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
return object.__getattr__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf()
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in
|
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py | carlboudreau007/ecosys | 245 | 5132 | from datetime import datetime
with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log:
begin = []
end = []
for line in log:
if 'Index population started' in line:
begin.append(line[:23])
elif 'Index creation finished' in line:
end.append(line[:23])
if len(begin) == 0 or len(begin) > 9:
print("Something went wrong. Please check debug.log")
elif len(begin) != len(end):
print("{}/{} Done. Please come back later.".format(len(end), len(begin)))
else:
elapsed_time = 0
for i in range(0,9):
begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f')
end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f')
elapsed_time += (end_tmp-begin_tmp).total_seconds()
print("Done in {} s".format(elapsed_time))
|
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_terrain_randomizer.py | felipeek/bullet3 | 9,136 | 5145 | """Generates a random terrain at Minitaur gym environment reset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import itertools
import math
import enum
import numpy as np
from pybullet_envs.minitaur.envs import env_randomizer_base
_GRID_LENGTH = 15
_GRID_WIDTH = 10
_MAX_SAMPLE_SIZE = 30
_MIN_BLOCK_DISTANCE = 0.7
_MAX_BLOCK_LENGTH = _MIN_BLOCK_DISTANCE
_MIN_BLOCK_LENGTH = _MAX_BLOCK_LENGTH / 2
_MAX_BLOCK_HEIGHT = 0.05
_MIN_BLOCK_HEIGHT = _MAX_BLOCK_HEIGHT / 2
class PoissonDisc2D(object):
"""Generates 2D points using Poisson disk sampling method.
Implements the algorithm described in:
http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf
Unlike the uniform sampling method that creates small clusters of points,
Poisson disk method enforces the minimum distance between points and is more
suitable for generating a spatial distribution of non-overlapping objects.
"""
def __init__(self, grid_length, grid_width, min_radius, max_sample_size):
"""Initializes the algorithm.
Args:
grid_length: The length of the bounding square in which points are
sampled.
grid_width: The width of the bounding square in which points are
sampled.
min_radius: The minimum distance between any pair of points.
max_sample_size: The maximum number of sample points around a active site.
See details in the algorithm description.
"""
self._cell_length = min_radius / math.sqrt(2)
self._grid_length = grid_length
self._grid_width = grid_width
self._grid_size_x = int(grid_length / self._cell_length) + 1
self._grid_size_y = int(grid_width / self._cell_length) + 1
self._min_radius = min_radius
self._max_sample_size = max_sample_size
# Flattern the 2D grid as an 1D array. The grid is used for fast nearest
# point searching.
self._grid = [None] * self._grid_size_x * self._grid_size_y
# Generate the first sample point and set it as an active site.
first_sample = np.array(np.random.random_sample(2)) * [grid_length, grid_width]
self._active_list = [first_sample]
# Also store the sample point in the grid.
self._grid[self._point_to_index_1d(first_sample)] = first_sample
def _point_to_index_1d(self, point):
"""Computes the index of a point in the grid array.
Args:
point: A 2D point described by its coordinates (x, y).
Returns:
The index of the point within the self._grid array.
"""
return self._index_2d_to_1d(self._point_to_index_2d(point))
def _point_to_index_2d(self, point):
"""Computes the 2D index (aka cell ID) of a point in the grid.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
x_index: The x index of the cell the point belongs to.
y_index: The y index of the cell the point belongs to.
"""
x_index = int(point[0] / self._cell_length)
y_index = int(point[1] / self._cell_length)
return x_index, y_index
def _index_2d_to_1d(self, index2d):
"""Converts the 2D index to the 1D position in the grid array.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
The 1D position of the cell within the self._grid array.
"""
return index2d[0] + index2d[1] * self._grid_size_x
def _is_in_grid(self, point):
"""Checks if the point is inside the grid boundary.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
Whether the point is inside the grid.
"""
return (0 <= point[0] < self._grid_length) and (0 <= point[1] < self._grid_width)
def _is_in_range(self, index2d):
"""Checks if the cell ID is within the grid.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
Whether the cell (2D index) is inside the grid.
"""
return (0 <= index2d[0] < self._grid_size_x) and (0 <= index2d[1] < self._grid_size_y)
def _is_close_to_existing_points(self, point):
"""Checks if the point is close to any already sampled (and stored) points.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
True iff the distance of the point to any existing points is smaller than
the min_radius
"""
px, py = self._point_to_index_2d(point)
# Now we can check nearby cells for existing points
for neighbor_cell in itertools.product(xrange(px - 1, px + 2), xrange(py - 1, py + 2)):
if not self._is_in_range(neighbor_cell):
continue
maybe_a_point = self._grid[self._index_2d_to_1d(neighbor_cell)]
if maybe_a_point is not None and np.linalg.norm(maybe_a_point - point) < self._min_radius:
return True
return False
def sample(self):
"""Samples new points around some existing point.
Removes the sampling base point and also stores the new jksampled points if
they are far enough from all existing points.
"""
active_point = self._active_list.pop()
for _ in xrange(self._max_sample_size):
# Generate random points near the current active_point between the radius
random_radius = np.random.uniform(self._min_radius, 2 * self._min_radius)
random_angle = np.random.uniform(0, 2 * math.pi)
# The sampled 2D points near the active point
sample = random_radius * np.array([np.cos(random_angle),
np.sin(random_angle)]) + active_point
if not self._is_in_grid(sample):
continue
if self._is_close_to_existing_points(sample):
continue
self._active_list.append(sample)
self._grid[self._point_to_index_1d(sample)] = sample
def generate(self):
"""Generates the Poisson disc distribution of 2D points.
Although the while loop looks scary, the algorithm is in fact O(N), where N
is the number of cells within the grid. When we sample around a base point
(in some base cell), new points will not be pushed into the base cell
because of the minimum distance constraint. Once the current base point is
removed, all future searches cannot start from within the same base cell.
Returns:
All sampled points. The points are inside the quare [0, grid_length] x [0,
grid_width]
"""
while self._active_list:
self.sample()
all_sites = []
for p in self._grid:
if p is not None:
all_sites.append(p)
return all_sites
class TerrainType(enum.Enum):
"""The randomzied terrain types we can use in the gym env."""
RANDOM_BLOCKS = 1
TRIANGLE_MESH = 2
class MinitaurTerrainRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Generates an uneven terrain in the gym env."""
def __init__(self,
terrain_type=TerrainType.TRIANGLE_MESH,
mesh_filename="robotics/reinforcement_learning/minitaur/envs/testdata/"
"triangle_mesh_terrain/terrain9735.obj",
mesh_scale=None):
"""Initializes the randomizer.
Args:
terrain_type: Whether to generate random blocks or load a triangle mesh.
mesh_filename: The mesh file to be used. The mesh will only be loaded if
terrain_type is set to TerrainType.TRIANGLE_MESH.
mesh_scale: the scaling factor for the triangles in the mesh file.
"""
self._terrain_type = terrain_type
self._mesh_filename = mesh_filename
self._mesh_scale = mesh_scale if mesh_scale else [1.0, 1.0, 0.3]
def randomize_env(self, env):
"""Generate a random terrain for the current env.
Args:
env: A minitaur gym environment.
"""
if self._terrain_type is TerrainType.TRIANGLE_MESH:
self._load_triangle_mesh(env)
if self._terrain_type is TerrainType.RANDOM_BLOCKS:
self._generate_convex_blocks(env)
def _load_triangle_mesh(self, env):
"""Represents the random terrain using a triangle mesh.
It is possible for Minitaur leg to stuck at the common edge of two triangle
pieces. To prevent this from happening, we recommend using hard contacts
(or high stiffness values) for Minitaur foot in sim.
Args:
env: A minitaur gym environment.
"""
env.pybullet_client.removeBody(env.ground_id)
terrain_collision_shape_id = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_MESH,
fileName=self._mesh_filename,
flags=1,
meshScale=self._mesh_scale)
env.ground_id = env.pybullet_client.createMultiBody(
baseMass=0, baseCollisionShapeIndex=terrain_collision_shape_id, basePosition=[0, 0, 0])
def _generate_convex_blocks(self, env):
"""Adds random convex blocks to the flat ground.
We use the Possion disk algorithm to add some random blocks on the ground.
Possion disk algorithm sets the minimum distance between two sampling
points, thus voiding the clustering effect in uniform N-D distribution.
Args:
env: A minitaur gym environment.
"""
poisson_disc = PoissonDisc2D(_GRID_LENGTH, _GRID_WIDTH, _MIN_BLOCK_DISTANCE, _MAX_SAMPLE_SIZE)
block_centers = poisson_disc.generate()
for center in block_centers:
# We want the blocks to be in front of the robot.
shifted_center = np.array(center) - [2, _GRID_WIDTH / 2]
# Do not place blocks near the point [0, 0], where the robot will start.
if abs(shifted_center[0]) < 1.0 and abs(shifted_center[1]) < 1.0:
continue
half_length = np.random.uniform(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH) / (2 * math.sqrt(2))
half_height = np.random.uniform(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT) / 2
box_id = env.pybullet_client.createCollisionShape(
env.pybullet_client.GEOM_BOX, halfExtents=[half_length, half_length, half_height])
env.pybullet_client.createMultiBody(
baseMass=0,
baseCollisionShapeIndex=box_id,
basePosition=[shifted_center[0], shifted_center[1], half_height])
|