code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
# NOTE: A place for helper utilities and decorators.
from wtoolzexceptions import exceptions
import flask
import marshmallow
def parse(schema, location):
if location == "args":
p = flask.request.args
elif location == "json":
p = flask.request.json
elif location == "view_args":
p = flask.request.view_args
else:
raise ValueError("location not args, json, or view_args.")
try:
return schema.load(p)
except marshmallow.ValidationError:
exceptions.ohoh(400)
| [
[
[
84,
94
],
[
510,
520
]
],
[
[
102,
107
],
[
197,
202
],
[
257,
262
],
[
322,
327
]
],
[
[
115,
126
],
[
473,
484
]
],
[
[
133,
138
]
]
] |
#!/usr/bin/python
# Victor del Pino
import sys
import re
linea = 0
ignorar = ""
contador = 100
impreso=0
"""
f1 = open('../ext/movies/movies.csv', 'r')
for line in f1:
if linea == 0:
re.sub(r'^\W+|\W+$', '', ignorar)
linea = linea + 1
else:
line = re.sub(r'^\W+|\W+$', '', line) # parsear linea
numbers = line.split(',', 4) # se trocea la linea para obtener los datos
print(numbers[0] + "\t" + numbers[1] + "\t-1")
f1.close()
f2 = open('../ext/movies/ratings.csv', 'r')
for line in f2:
if linea == 0:
re.sub(r'^\W+|\W+$', '', ignorar)
linea = linea + 1
else:
line = re.sub(r'^\W+|\W+$', '', line) # parsear linea
numbers = line.split(',', 4) # se trocea la linea para obtener los datos
print(numbers[1] + "\t-1" + "\t" + numbers[2])
f2.close()
"""
"""
"""
f1 = open('../ext/movies/movies.csv', 'r')
f2 = open('../ext/movies/ratings.csv', 'r')
for line in f1:
if linea == 0:
re.sub(r'^\W+|\W+$', '', ignorar)
linea = linea + 1
else:
line = re.sub(r'^\W+|\W+$', '', line) # parsear linea
numbers = line.split(',', 4) # se trocea la linea para obtener los datos
linea2 = 0
for line2 in f2:
if linea2 == 0:
re.sub(r'^\W+|\W+$', '', ignorar)
linea2 = linea2 + 1
else:
line2 = re.sub(r'^\W+|\W+$', '', line2) # parsear linea
numbers2 = line2.split(',', 4) # se trocea la linea para obtener los datos
if numbers[0] == numbers2[1]:
print(numbers[1] + "\t" + numbers2[2])
impreso = 1
f2.seek(0)
if impreso == 0:
print(numbers[0] + "\t" + numbers[1] + "\t-1")
impreso = 0
f1.close()
f2.close()
"""
"""
"""
for line in sys.stdin:
if linea == 0:
re.sub(r'^\W+|\W+$', '', ignorar)
linea = linea + 1
else:
line = re.sub(r'^\W+|\W+$', '', line) # parsear linea
numbers = line.split(',', 4) # se trocea la linea para obtener los datos
print(numbers[1] + "\t" + numbers[2])
""" | [
[
[
44,
47
]
],
[
[
55,
57
],
[
995,
997
],
[
1080,
1082
],
[
1299,
1301
],
[
1411,
1413
]
],
[
[
59,
64
],
[
975,
980
],
[
1045,
1050
]
],
[
[
69,
76
],
[
1020,
1027
],
[
1324,
1331
]
],
[
[
83,
91
]
],
[
[
98,
105
],
[
1720,
1727
]
],
[
[
865,
867
],
[
964,
966
],
[
1814,
1816
]
],
[
[
908,
910
],
[
1251,
1253
],
[
1698,
1700
],
[
1825,
1827
]
],
[
[
956,
960
],
[
1105,
1109
]
],
[
[
1037,
1042
],
[
975,
980
],
[
1045,
1050
]
],
[
[
1073,
1077
],
[
1146,
1150
]
],
[
[
1136,
1143
],
[
1572,
1579
],
[
1625,
1632
],
[
1752,
1759
],
[
1772,
1779
]
],
[
[
1219,
1225
],
[
1270,
1276
],
[
1358,
1364
]
],
[
[
1242,
1247
],
[
1436,
1441
]
],
[
[
1349,
1355
],
[
1270,
1276
],
[
1358,
1364
]
],
[
[
1403,
1408
],
[
1487,
1492
]
],
[
[
1476,
1484
],
[
1586,
1594
],
[
1645,
1653
]
],
[
[
1678,
1685
],
[
1720,
1727
]
],
[
[
1801,
1808
],
[
1720,
1727
]
]
] |
"""
Functions for calculating LOFAR hardware specific properties.
"""
import tkp.telescope.lofar.antennaarrays
import tkp.telescope.lofar.beam
import tkp.telescope.lofar.noise
import tkp.telescope.lofar.quality
| [
[
[
77,
110
]
],
[
[
118,
142
]
],
[
[
150,
175
]
],
[
[
183,
210
]
]
] |
from collections import namedtuple
Item = namedtuple('Item', ['item', 'at'])
ObsItem = namedtuple('ObsItem', ['at'])
Link = namedtuple('Link', ['from_x', 'from_y', 'to_x', 'to_y'])
class Observable(object):
def __init__(self, start, is_child=False):
self.label = None
self.start = start
self.end = start
self.is_child = is_child
self.items = []
self.completed = None
self.error = None
def set_label(self, label):
self.label = label
def on_next_at(self, item, at):
self.items.append(Item(item, at))
def on_observable_at(self, at):
self.items.append(ObsItem(at))
def on_completed_at(self, at):
self.completed = at
self.end = at
def on_error_at(self, at):
self.error = at
self.end = at
def on_continued_at(self, at):
self.end = at
class Operator(object):
def __init__(self, start, end, text):
self.start = start
self.end = end
self.text = text
class Marble(object):
def __init__(self):
self.layers = []
self.higher_order_links = []
self.item_links = []
self.label_links = []
return
def add_observable(self, observable):
self.layers.append(observable)
def add_operator(self, operator):
self.layers.append(operator)
def _compute_higher_order_links(self):
def nearest_links(parents, children):
links = []
for parent in parents:
dist = None
nearest = None
for child in children:
d = abs(parent[0] - child[0])
if nearest is None or d < dist:
dist = d
nearest = child
if nearest is not None:
links.append(Link(
from_x=parent[0], from_y=parent[1],
to_x=nearest[0], to_y=nearest[1],
))
return links
children = []
parents = []
links = []
for layer_index, layer in enumerate(self.layers):
if type(layer) is Operator:
links.extend(nearest_links(parents, children))
children.clear()
parents.clear()
elif type(layer) is Observable:
if layer.is_child is True:
children.append((layer.start, layer_index))
else:
for item in layer.items:
if type(item) is ObsItem:
parents.append((item.at, layer_index))
links.extend(nearest_links(parents, children))
return links
@staticmethod
def _append_links(links, top_layer, bottom_layer, items):
for item in items:
if top_layer is not None:
links.append(Link(
from_x=item[0], from_y=top_layer,
to_x=item[0], to_y=item[1],
))
if bottom_layer is not None:
links.append(Link(
from_x=item[0], from_y=item[1],
to_x=item[0], to_y=bottom_layer,
))
return links
def _compute_item_links(self):
top_layer = None
items = []
links = []
for layer_index, layer in enumerate(self.layers):
if type(layer) is Operator:
Marble._append_links(links, top_layer, layer_index, items)
items.clear()
top_layer = layer_index
elif type(layer) is Observable:
if layer.label is None:
for item in layer.items:
items.append((item.at, layer_index))
Marble._append_links(links, top_layer, None, items)
return links
def _compute_label_links(self):
top_layer = None
items = []
links = []
for layer_index, layer in enumerate(self.layers):
if type(layer) is Operator:
Marble._append_links(links, top_layer, layer_index, items)
items.clear()
top_layer = layer_index
elif type(layer) is Observable:
if layer.label is not None:
items.append((layer.start, layer_index))
Marble._append_links(links, top_layer, None, items)
return links
def build(self):
self.higher_order_links = self._compute_higher_order_links()
self.item_links = self._compute_item_links()
self.label_links = self._compute_label_links()
| [
[
[
24,
34
],
[
43,
53
],
[
88,
98
],
[
125,
135
]
],
[
[
36,
40
],
[
571,
575
]
],
[
[
78,
85
],
[
650,
657
],
[
2586,
2593
]
],
[
[
118,
122
],
[
2914,
2918
],
[
3111,
3115
],
[
1865,
1869
]
],
[
[
190,
200
],
[
2359,
2369
],
[
3636,
3646
],
[
4251,
4261
]
],
[
[
893,
901
],
[
2189,
2197
],
[
3449,
3457
],
[
4064,
4072
]
],
[
[
1036,
1042
],
[
3475,
3481
],
[
3803,
3809
],
[
4090,
4096
],
[
4377,
4383
]
]
] |
# RUN WITH /usr/bin/python3 minet.py (python 3.6)
import sys
import numpy as np
from sklearn.metrics import roc_curve, auc
import pandas as pd
def compute_aggregated_matrix(matrixfiles_num, matrixfiles, savematrixfile, saveresultfile, coeffs=[1, 1, 1, 1]):
# matrixfiles_num = int(sys.argv[1])
# matrixfiles = [sys.argv[i] for i in range(2, matrixfiles_num + 2)]
# savematrixfile = sys.argv[matrixfiles_num + 2]
# saveresultfile = sys.argv[matrixfiles_num + 3]
matrices = [pd.read_csv(f, index_col=0, sep='\t') for f in matrixfiles]
genes = matrices[0].index
# print(genes)
# print(matrices)
sz = len(matrices[0])
for matrix in matrices:
assert len(matrix) == sz
for matrix in matrices:
for column in matrix:
temp = matrix[column].argsort()
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(matrix[column]))
matrix[column] = ranks
res = np.zeros(shape=(sz, sz))
for s in range(sz):
for i, matrix in enumerate(matrices):
res[s] += matrix.iloc[:, s].values * coeffs[i]
res[s] /= len(matrices)
for row in res:
row /= row.sum()
result_df = pd.DataFrame(res, columns=genes, index=genes)
result_df.to_csv(saveresultfile, index=True, header=True, sep='\t')
# print(result_df)
return result_df
matricesdirname = "/home/user/Sirius/gene_network_sirius_2019/Matrices_1"
savematricesdirname = "/home/user/Sirius/gene_network_sirius_2019/Matrices_6"
predictedfilename = matricesdirname + "/{1}_{0}_predicted.txt"
truefilename = matricesdirname + "/{1}_{0}_true.txt"
savematricesfilename = savematricesdirname + "/{0}_predicted.txt"
# datalist = ['exps_10', 'exps_10_2', 'exps_10_bgr', 'exps_50', 'exps_50_2', 'exps_50_bgr', 'exps_100', 'exps_100_2', 'exps_100_bgr', 'genes_200_exps_10_bgr', 'genes_400_exps_10_bgr', 'genes_600_exps_10_bgr', 'genes_700_exps_10_bgr', 'genes_1000_exps_10_bgr']
datalist = ['genes_200_exps_10_bgr', 'genes_200_exps_20_bgr', 'genes_200_exps_40_bgr', 'genes_400_exps_10_bgr', 'genes_400_exps_40_bgr', 'genes_400_exps_80_bgr', 'genes_500_exps_10_bgr', 'genes_500_exps_50_bgr', 'genes_500_exps_100_bgr']
algolist = ['aracne', 'mrnet', 'mrnetb']
saveresultsfile = "/home/user/Sirius/gene_network_sirius_2019/RankAggregation/res_arrgeg_on_petr_big_data_many_exps.txt"
tmpfile = "/home/user/Sirius/gene_network_sirius_2019/RankAggregation/data/tmp5.txt"
if __name__ == "__main__":
results = np.zeros(shape=(len(datalist)))
for i, dataname in enumerate(datalist):
true_df = pd.read_csv(truefilename.format(dataname, algolist[1]), index_col=0, sep='\t')
predicted_df = compute_aggregated_matrix(len(algolist), [predictedfilename.format(dataname, algo) for algo in algolist], tmpfile, savematricesfilename.format(dataname))
true_df.to_csv(savematricesdirname + "/{0}_true.txt".format(dataname), index=True, header=True, sep='\t')
# print(true_df)
true_array = true_df.values[np.triu_indices(true_df.values.shape[0], k=1)]
predicted_array = predicted_df.values[np.triu_indices(predicted_df.values.shape[0], k=1)]
roc_auc = 0
# try:
# fpr, tpr, thresholds = roc_curve(true_array, predicted_array)
# roc_auc = auc(fpr, tpr)
# except:
# print("error", dataname, algo)
fpr, tpr, thresholds = roc_curve(true_array, predicted_array)
roc_auc = auc(fpr, tpr)
results[i] = roc_auc
with open(savematricesdirname + "/{0}_auc.txt".format(dataname), 'w') as f:
f.write(str(roc_auc) + '\n')
print("done", dataname, results[i])
with open(saveresultsfile, "a") as f:
f.write("done " + dataname + str(results[i]))
# print("done", dataname, algo)
print(results)
| [
[
[
58,
61
]
],
[
[
69,
80
],
[
2505,
2507
],
[
3033,
3035
],
[
3126,
3128
],
[
838,
840
],
[
884,
886
],
[
965,
967
]
],
[
[
109,
118
],
[
3430,
3439
]
],
[
[
120,
123
],
[
3487,
3490
]
],
[
[
131,
143
],
[
2601,
2603
],
[
496,
498
],
[
1214,
1216
]
],
[
[
150,
175
],
[
2703,
2728
]
],
[
[
1383,
1398
],
[
1555,
1570
],
[
1613,
1628
]
],
[
[
1457,
1476
],
[
1674,
1693
],
[
2880,
2899
],
[
3549,
3568
]
],
[
[
1535,
1552
],
[
2745,
2762
]
],
[
[
1598,
1610
],
[
2613,
2625
]
],
[
[
1651,
1671
],
[
2818,
2838
]
],
[
[
1977,
1985
],
[
2525,
2533
],
[
2571,
2579
]
],
[
[
2215,
2223
],
[
2643,
2651
],
[
2733,
2741
],
[
2798,
2806
]
],
[
[
2256,
2271
],
[
3718,
3733
]
],
[
[
2377,
2384
],
[
2809,
2816
]
],
[
[
2495,
2502
],
[
3509,
3516
],
[
3688,
3695
],
[
3791,
3798
],
[
3864,
3871
]
],
[
[
2546,
2547
],
[
3517,
3518
],
[
3696,
3697
],
[
3799,
3800
]
],
[
[
2549,
2557
],
[
2633,
2641
],
[
2770,
2778
],
[
2846,
2854
],
[
2925,
2933
],
[
3593,
3601
],
[
3678,
3686
],
[
3776,
3784
]
],
[
[
2591,
2598
],
[
2865,
2872
],
[
3018,
3025
],
[
3049,
3056
]
],
[
[
2688,
2700
],
[
3106,
3118
],
[
3142,
3154
]
],
[
[
3005,
3015
],
[
3440,
3450
]
],
[
[
3088,
3103
],
[
3452,
3467
]
],
[
[
3195,
3202
]
],
[
[
3407,
3410
],
[
3491,
3494
]
],
[
[
3412,
3415
],
[
3496,
3499
]
],
[
[
3417,
3427
]
],
[
[
3477,
3484
],
[
3522,
3529
],
[
3639,
3646
]
],
[
[
3612,
3613
],
[
3627,
3628
]
],
[
[
3743,
3744
],
[
3758,
3759
]
]
] |
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
from rest_framework import status, permissions
from rest_framework.decorators import api_view, permission_classes
from task.models import TaskModel, Status
from worker import WORKER_LIST
from worker.settings import NEW_TASK_EVENT
@api_view(['POST'])
@permission_classes((permissions.IsAuthenticated,))
def create_task(request):
task = TaskModel(status=Status.IN_QUEUE)
task.save()
if NEW_TASK_EVENT.is_set():
NEW_TASK_EVENT.clear()
NEW_TASK_EVENT.set()
return JsonResponse({"task_id": task.pk})
@api_view(['GET'])
def get_info(request, task_id):
try:
task = TaskModel.objects.get(pk=task_id) # type: TaskModel
return JsonResponse({
'status': task.status.label,
'create_time': task.create_time,
'start_time': task.start_time,
'time_to_execute': str(task.exec_time - task.start_time) if task.exec_time is not None else None,
})
except ObjectDoesNotExist:
return JsonResponse({"message": "Task #%s does not exists" % task_id}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@permission_classes((permissions.IsAdminUser,))
def start_workers(request):
try:
for x in WORKER_LIST:
x.start()
return JsonResponse({"message": "workers are running"})
except Exception as e:
return JsonResponse({"message": str(e)}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@permission_classes((permissions.IsAdminUser,))
def disable_workers(request):
try:
for x in WORKER_LIST:
x.disable()
return JsonResponse({"message": "workers are disabled"})
except Exception as e:
return JsonResponse({"message": str(e)}, status=status.HTTP_400_BAD_REQUEST)
| [
[
[
35,
53
],
[
1039,
1057
]
],
[
[
78,
90
],
[
583,
595
],
[
763,
775
],
[
1074,
1086
],
[
1348,
1360
],
[
1439,
1451
],
[
1687,
1699
],
[
1779,
1791
]
],
[
[
118,
124
],
[
1145,
1151
],
[
1480,
1486
],
[
1820,
1826
]
],
[
[
126,
137
],
[
365,
376
],
[
1217,
1228
],
[
1552,
1563
]
],
[
[
176,
184
],
[
325,
333
],
[
621,
629
],
[
1177,
1185
],
[
1512,
1520
]
],
[
[
186,
204
],
[
345,
363
],
[
1197,
1215
],
[
1532,
1550
]
],
[
[
230,
239
],
[
433,
442
],
[
695,
704
]
],
[
[
241,
247
],
[
450,
456
]
],
[
[
267,
278
],
[
1298,
1309
],
[
1635,
1646
]
],
[
[
307,
321
],
[
491,
505
],
[
524,
538
],
[
551,
565
]
],
[
[
400,
411
]
],
[
[
643,
651
]
],
[
[
1248,
1261
]
],
[
[
1583,
1598
]
]
] |
import logging
import json
from typing import List, Type, Union
from keras.models import Model
from keras.layers.merge import Concatenate
from keras.layers import (
Dense, LSTM, Bidirectional, Embedding, Input, Dropout,
TimeDistributed
)
import delft.sequenceLabelling.wrapper
from delft.utilities.layers import ChainCRF
from delft.sequenceLabelling.models import BaseModel
from delft.sequenceLabelling.models import get_model as _get_model, BidLSTM_CRF_FEATURES
from sciencebeam_trainer_delft.sequence_labelling.config import ModelConfig
LOGGER = logging.getLogger(__name__)
class CustomModel(BaseModel):
def __init__(
self, config, ntags,
require_casing: bool = False,
use_crf: bool = False,
supports_features: bool = False,
require_features_indices_input: bool = False,
stateful: bool = False):
super().__init__(config, ntags)
self.require_casing = require_casing
self.use_crf = use_crf
self.supports_features = supports_features
self.require_features_indices_input = require_features_indices_input
self.stateful = stateful
def _concatenate_inputs(inputs: list, **kwargs):
if len(inputs) == 1:
return inputs[0]
return Concatenate(**kwargs)(inputs)
# renamed copy of BidLSTM_CRF to demonstrate a custom model
class CustomBidLSTM_CRF(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
def __init__(self, config: ModelConfig, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
stateful=config.stateful
)
stateful = self.stateful
# stateful RNNs require the batch size to be passed in
input_batch_size = config.batch_size if stateful else None
model_inputs = []
lstm_inputs = []
# build input, directly feed with word embedding by the data generator
word_input = Input(
shape=(None, config.word_embedding_size),
batch_shape=(input_batch_size, None, config.word_embedding_size),
name='word_input'
)
model_inputs.append(word_input)
lstm_inputs.append(word_input)
# build character based embedding
char_input = Input(
shape=(None, config.max_char_length),
batch_shape=(input_batch_size, None, config.max_char_length),
dtype='int32',
name='char_input'
)
model_inputs.append(char_input)
if config.char_embedding_size:
assert config.char_vocab_size, 'config.char_vocab_size required'
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=config.char_input_mask_zero,
name='char_embeddings_embedding'
), name='char_embeddings')(char_input)
chars = TimeDistributed(
Bidirectional(LSTM(
config.num_char_lstm_units,
dropout=config.char_input_dropout,
recurrent_dropout=config.char_lstm_dropout,
return_sequences=False
)),
name='char_lstm'
)(char_embeddings)
lstm_inputs.append(chars)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
LOGGER.debug('model, config.use_features: %s', config.use_features)
if config.use_features:
LOGGER.info('model using features')
assert config.max_feature_size > 0
features_input = Input(
batch_shape=(input_batch_size, None, config.max_feature_size),
name='features_input'
)
model_inputs.append(features_input)
features = features_input
if config.features_embedding_size:
features = TimeDistributed(Dense(
config.features_embedding_size,
name='features_embeddings_dense'
), name='features_embeddings')(features)
LOGGER.info(
'word_input=%s, chars=%s, features=%s',
word_input, chars, features
)
lstm_inputs.append(features)
x = _concatenate_inputs(lstm_inputs, name='word_lstm_input')
x = Dropout(config.dropout, name='word_lstm_input_dropout')(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout,
stateful=stateful,
), name='word_lstm')(x)
x = Dropout(config.dropout, name='word_lstm_output_dropout')(x)
x = Dense(
config.num_word_lstm_units, name='word_lstm_dense', activation='tanh'
)(x)
x = Dense(ntags, name='dense_ntags')(x)
self.crf = ChainCRF(name='crf')
pred = self.crf(x)
model_inputs.append(length_input)
self.model = Model(inputs=model_inputs, outputs=[pred])
self.config = config
# copied from
# https://github.com/kermitt2/delft/blob/d2f8390ac01779cab959f57aa6e1a8f1d2723505/
# delft/sequenceLabelling/models.py
class CustomBidLSTM_CRF_FEATURES(CustomModel):
"""
A Keras implementation of BidLSTM-CRF for sequence labelling which create features
from additional orthogonal information generated by GROBID.
References
--
Guillaume Lample, Miguel Ballesteros, Sandeep Subramanian, Kazuya Kawakami, Chris Dyer.
"Neural Architectures for Named Entity Recognition". Proceedings of NAACL 2016.
https://arxiv.org/abs/1603.01360
"""
name = 'CustomBidLSTM_CRF_FEATURES'
def __init__(self, config, ntags=None):
super().__init__(
config, ntags,
require_casing=False, use_crf=True, supports_features=True,
require_features_indices_input=True
)
# build input, directly feed with word embedding by the data generator
word_input = Input(shape=(None, config.word_embedding_size), name='word_input')
# build character based embedding
char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
char_embeddings = TimeDistributed(Embedding(
input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
mask_zero=True,
name='char_embeddings'
))(char_input)
chars = TimeDistributed(Bidirectional(LSTM(
config.num_char_lstm_units,
return_sequences=False
)))(char_embeddings)
# layout features input and embeddings
features_input = Input(
shape=(None, len(config.features_indices)),
dtype='float32',
name='features_input'
)
assert config.features_vocabulary_size, "config.features_vocabulary_size required"
assert config.features_embedding_size, "config.features_embedding_size required"
# features_vocabulary_size (default 12) * number_of_features + 1
# (the zero is reserved for masking / padding)
features_embedding = TimeDistributed(
Embedding(
input_dim=config.features_vocabulary_size * len(config.features_indices) + 1,
output_dim=config.features_embedding_size,
mask_zero=True,
trainable=True,
name='features_embedding'),
name="features_embedding_td_1"
)(features_input)
assert config.features_lstm_units, "config.features_lstm_units required"
features_embedding_bd = TimeDistributed(
Bidirectional(LSTM(config.features_lstm_units, return_sequences=False)),
name="features_embedding_td_2"
)(features_embedding)
features_embedding_out = Dropout(config.dropout)(features_embedding_bd)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
x = Concatenate()([word_input, chars, features_embedding_out])
x = Dropout(config.dropout)(x)
x = Bidirectional(LSTM(
units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout
))(x)
x = Dropout(config.dropout)(x)
x = Dense(config.num_word_lstm_units, activation='tanh')(x)
x = Dense(ntags)(x)
self.crf = ChainCRF()
pred = self.crf(x)
self.model = Model(
inputs=[word_input, char_input, features_input, length_input],
outputs=[pred]
)
self.config = config
DEFAULT_MODEL_NAMES = [
'BidLSTM_CRF', 'BidLSTM_CNN', 'BidLSTM_CNN_CRF', 'BidGRU_CRF', 'BidLSTM_CRF_CASING',
BidLSTM_CRF_FEATURES.name
]
MODEL_MAP = {
'CustomBidLSTM_CRF': CustomBidLSTM_CRF,
CustomBidLSTM_CRF_FEATURES.name: CustomBidLSTM_CRF_FEATURES
}
IMPLICIT_MODEL_CONFIG_PROPS_MAP = {
BidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
),
CustomBidLSTM_CRF_FEATURES.name: dict(
use_features=True,
use_features_indices_input=True
)
}
def register_model(name: str, model_class: Type[CustomModel]):
MODEL_MAP[name] = model_class
def updated_implicit_model_config_props(model_config: ModelConfig):
implicit_model_config_props = IMPLICIT_MODEL_CONFIG_PROPS_MAP.get(model_config.model_type)
if not implicit_model_config_props:
return
for key, value in implicit_model_config_props.items():
setattr(model_config, key, value)
def _create_model(
model_class: Type[CustomModel],
config: ModelConfig,
ntags=None) -> CustomModel:
return model_class(config, ntags=ntags)
def is_model_stateful(model: Union[BaseModel, CustomModel]) -> bool:
try:
return model.stateful
except AttributeError:
return False
def get_model(config, preprocessor, ntags=None):
LOGGER.info(
'get_model, config: %s, ntags=%s',
json.dumps(vars(config), indent=4),
ntags
)
model_class = MODEL_MAP.get(config.model_type)
if not model_class:
return _get_model(config, preprocessor, ntags=ntags)
model = _create_model(model_class, config, ntags=ntags)
config.use_crf = model.use_crf
preprocessor.return_casing = model.require_casing
if config.use_features and not model.supports_features:
LOGGER.warning('features enabled but not supported by model (disabling)')
config.use_features = False
preprocessor.return_features = config.use_features
return model
def get_model_names() -> List[str]:
return sorted(set(DEFAULT_MODEL_NAMES) | set(MODEL_MAP.keys()))
def patch_get_model():
delft.sequenceLabelling.wrapper.get_model = get_model
| [
[
[
7,
14
],
[
561,
568
]
],
[
[
22,
26
],
[
10730,
10734
]
],
[
[
46,
50
],
[
11350,
11354
]
],
[
[
52,
56
],
[
9908,
9912
],
[
10325,
10329
]
],
[
[
58,
63
],
[
10484,
10489
]
],
[
[
90,
95
],
[
5531,
5536
],
[
9177,
9182
]
],
[
[
127,
138
],
[
1278,
1289
],
[
8682,
8693
]
],
[
[
170,
175
],
[
4442,
4447
],
[
5249,
5254
],
[
5363,
5368
],
[
9014,
9019
],
[
9082,
9087
]
],
[
[
177,
181
],
[
3334,
3338
],
[
4959,
4963
],
[
7050,
7054
],
[
8234,
8238
],
[
8807,
8811
]
],
[
[
183,
196
],
[
3320,
3333
],
[
4945,
4958
],
[
7036,
7049
],
[
8220,
8233
],
[
8793,
8806
]
],
[
[
198,
207
],
[
2995,
3004
],
[
6809,
6818
],
[
7736,
7745
]
],
[
[
209,
214
],
[
2272,
2277
],
[
2594,
2599
],
[
3781,
3786
],
[
4128,
4133
],
[
6558,
6563
],
[
6689,
6694
],
[
7233,
7238
],
[
8555,
8560
]
],
[
[
216,
223
],
[
4873,
4880
],
[
5177,
5184
],
[
8400,
8407
],
[
8753,
8760
],
[
8975,
8982
]
],
[
[
229,
244
],
[
2979,
2994
],
[
3287,
3302
],
[
4426,
4441
],
[
6793,
6808
],
[
7020,
7035
],
[
7707,
7722
],
[
8191,
8206
]
],
[
[
255,
286
],
[
11458,
11463
]
],
[
[
322,
330
],
[
5418,
5426
],
[
9117,
9125
]
],
[
[
374,
383
],
[
609,
618
],
[
10490,
10499
]
],
[
[
427,
450
],
[
10877,
10887
]
],
[
[
452,
472
],
[
9444,
9464
],
[
9638,
9658
]
],
[
[
538,
549
],
[
1758,
1769
],
[
10018,
10029
],
[
10360,
10371
]
],
[
[
552,
558
],
[
3904,
3910
],
[
4016,
4022
],
[
4623,
4629
],
[
10666,
10672
],
[
11141,
11147
]
],
[
[
597,
608
],
[
1394,
1405
],
[
5771,
5782
],
[
9913,
9924
],
[
10396,
10407
],
[
10330,
10341
],
[
10501,
10512
]
],
[
[
1172,
1191
],
[
4804,
4823
]
],
[
[
1376,
1393
],
[
9512,
9529
]
],
[
[
5744,
5770
],
[
9535,
9561
],
[
9568,
9594
],
[
9749,
9775
]
],
[
[
9327,
9346
],
[
11383,
11402
]
],
[
[
9473,
9482
],
[
9932,
9941
],
[
10805,
10814
],
[
11410,
11419
]
],
[
[
9598,
9629
],
[
10066,
10097
]
],
[
[
9869,
9883
]
],
[
[
9968,
10003
]
],
[
[
10289,
10302
],
[
10936,
10949
]
],
[
[
10459,
10476
]
],
[
[
10617,
10626
],
[
11502,
11511
]
],
[
[
11329,
11344
]
],
[
[
11435,
11450
]
]
] |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from rqalpha.utils.datetime_func import TimeRange
TRADING_PERIOD_DICT = dict()
STOCK_TRADING_PERIOD = [
TimeRange(start=time(9, 31), end=time(11, 30)),
TimeRange(start=time(13, 1), end=time(15, 0)),
]
# | 商品期货 WR, FU, CS, C, L, V, PP, BB, FB, JD, WH, PM, RI, SF, SM, RS, JR, LR, AP | 09:01~10:15, 10:31~11:30, 13:31~15:00 |
time_period1 = [
TimeRange(start=time(9, 1), end=time(10, 15)),
TimeRange(start=time(10, 31), end=time(11, 30)),
TimeRange(start=time(13, 31), end=time(15, 0)),
]
TRADING_PERIOD_DICT.update({
underlying_symbol: time_period1
for underlying_symbol in
["WR", "FU", "CS", "C", "L", "V", "PP", "BB", "FB", "JD", "WH", "PM", "RI", "SF", "SM", "RS", "JR", "LR", "AP"]
})
# | 商品期货 Y, M, A, B, P, J, JM, I, CF, SR, OI, TA, MA, ZC, FG, RM, CY | 21:01~23:30, 09:01~10:15, 10:31~11:30, 13:31~15:00 |
time_period2 = [
TimeRange(start=time(21, 1), end=time(23, 30)),
TimeRange(start=time(9, 1), end=time(10, 15)),
TimeRange(start=time(10, 31), end=time(11, 30)),
TimeRange(start=time(13, 31), end=time(15, 0)),
]
TRADING_PERIOD_DICT.update({
underlying_symbol: time_period2
for underlying_symbol in ["Y", "M", "A", "B", "P", "J", "JM", "I", "CF", "SR", "OI", "TA", "MA", "ZC", "FG", "RM", "CY"]
})
# | 商品期货 CU, AL, ZN, PB, SN, NI | 21:01~1:00, 09:01~10:15, 10:31~11:30, 13:31~15:00 |
time_period3 = [
TimeRange(start=time(21, 1), end=time(23, 59)),
TimeRange(start=time(0, 0), end=time(1, 0)),
TimeRange(start=time(9, 1), end=time(10, 15)),
TimeRange(start=time(10, 31), end=time(11, 30)),
TimeRange(start=time(13, 31), end=time(15, 0)),
]
TRADING_PERIOD_DICT.update(
{underlying_symbol: time_period3
for underlying_symbol in ["CU", "AL", "ZN", "PB", "SN", "NI"]})
# | 商品期货 RB, HC, BU, RU | 21:01~23:00, 09:01~10:15, 10:31~11:30, 13:31~15:00 |
time_period4 = [
TimeRange(start=time(21, 1), end=time(23, 0)),
TimeRange(start=time(9, 1), end=time(10, 15)),
TimeRange(start=time(10, 31), end=time(11, 30)),
TimeRange(start=time(13, 31), end=time(15, 0)),
]
TRADING_PERIOD_DICT.update({underlying_symbol: time_period4 for underlying_symbol in ["RB", "HC", "BU", "RU"]})
# | 商品期货 AU, AG | 21:01~2:30, 09:01~10:15, 10:31~11:30, 13:31~15:00 |
time_period5 = [
TimeRange(start=time(21, 1), end=time(23, 59)),
TimeRange(start=time(0, 0), end=time(2, 30)),
TimeRange(start=time(9, 1), end=time(10, 15)),
TimeRange(start=time(10, 31), end=time(11, 30)),
TimeRange(start=time(13, 31), end=time(15, 0)),
]
TRADING_PERIOD_DICT.update({underlying_symbol: time_period5 for underlying_symbol in ["AU", "AG", "SC"]})
# | 股指期货 product='Index' | 09:31~11:30, 13:01~15:00 |
time_period6 = [
TimeRange(start=time(9, 31), end=time(11, 30)),
TimeRange(start=time(13, 1), end=time(15, 0)),
]
TRADING_PERIOD_DICT.update({underlying_symbol: time_period6 for underlying_symbol in ["IF", "IH", "IC"]})
# | 国债期货 product='Government' | 09:16~11:30, 13:01~15:15|
time_period7 = [
TimeRange(start=time(9, 16), end=time(11, 30)),
TimeRange(start=time(13, 1), end=time(15, 15)),
]
TRADING_PERIOD_DICT.update({underlying_symbol: time_period7 for underlying_symbol in ["T", "TF", "TS"]})
| [
[
[
626,
630
],
[
758,
762
],
[
775,
779
],
[
810,
814
],
[
827,
831
],
[
1005,
1009
],
[
1021,
1025
],
[
1056,
1060
],
[
1074,
1078
],
[
1109,
1113
],
[
1127,
1131
],
[
1518,
1522
],
[
1535,
1539
],
[
1570,
1574
],
[
1586,
1590
],
[
1621,
1625
],
[
1639,
1643
],
[
1674,
1678
],
[
1692,
1696
],
[
2025,
2029
],
[
2042,
2046
],
[
2077,
2081
],
[
2093,
2097
],
[
2126,
2130
],
[
2142,
2146
],
[
2177,
2181
],
[
2195,
2199
],
[
2230,
2234
],
[
2248,
2252
],
[
2515,
2519
],
[
2532,
2536
],
[
2566,
2570
],
[
2582,
2586
],
[
2617,
2621
],
[
2635,
2639
],
[
2670,
2674
],
[
2688,
2692
],
[
2924,
2928
],
[
2941,
2945
],
[
2976,
2980
],
[
2992,
2996
],
[
3026,
3030
],
[
3042,
3046
],
[
3077,
3081
],
[
3095,
3099
],
[
3130,
3134
],
[
3148,
3152
],
[
3362,
3366
],
[
3379,
3383
],
[
3414,
3418
],
[
3431,
3435
],
[
3649,
3653
],
[
3666,
3670
],
[
3701,
3705
],
[
3718,
3722
]
],
[
[
672,
681
],
[
742,
751
],
[
794,
803
],
[
989,
998
],
[
1040,
1049
],
[
1093,
1102
],
[
1502,
1511
],
[
1554,
1563
],
[
1605,
1614
],
[
1658,
1667
],
[
2009,
2018
],
[
2061,
2070
],
[
2110,
2119
],
[
2161,
2170
],
[
2214,
2223
],
[
2499,
2508
],
[
2550,
2559
],
[
2601,
2610
],
[
2654,
2663
],
[
2908,
2917
],
[
2960,
2969
],
[
3010,
3019
],
[
3061,
3070
],
[
3114,
3123
],
[
3346,
3355
],
[
3398,
3407
],
[
3633,
3642
],
[
3685,
3694
]
],
[
[
683,
702
],
[
1143,
1162
],
[
1708,
1727
],
[
2264,
2283
],
[
2704,
2723
],
[
3164,
3183
],
[
3447,
3466
],
[
3735,
3754
]
],
[
[
713,
733
]
],
[
[
968,
980
],
[
1195,
1207
]
],
[
[
1481,
1493
],
[
1760,
1772
]
],
[
[
1988,
2000
],
[
2316,
2328
]
],
[
[
2478,
2490
],
[
2751,
2763
]
],
[
[
2887,
2899
],
[
3211,
3223
]
],
[
[
3325,
3337
],
[
3494,
3506
]
],
[
[
3612,
3624
],
[
3782,
3794
]
]
] |
from django.apps import AppConfig
from django.db import connections as djcs
from django.core.exceptions import ImproperlyConfigured
class ExplorerAppConfig(AppConfig):
name = 'explorer'
def ready(self):
from explorer.schema import build_async_schemas
_validate_connections()
build_async_schemas()
def _get_default():
from explorer.app_settings import EXPLORER_DEFAULT_CONNECTION
return EXPLORER_DEFAULT_CONNECTION
def _get_explorer_connections():
from explorer.app_settings import EXPLORER_CONNECTIONS
return EXPLORER_CONNECTIONS
def _validate_connections():
# Validate connections
if _get_default() not in _get_explorer_connections().values():
raise ImproperlyConfigured(
'EXPLORER_DEFAULT_CONNECTION is %s, but that alias is not present in the values of EXPLORER_CONNECTIONS'
% _get_default())
for name, conn_name in _get_explorer_connections().items():
if conn_name not in djcs:
raise ImproperlyConfigured(
'EXPLORER_CONNECTIONS contains (%s, %s), but %s is not a valid Django DB connection.'
% (name, conn_name, conn_name))
| [
[
[
24,
33
],
[
158,
167
]
],
[
[
56,
75
],
[
988,
992
]
],
[
[
111,
131
],
[
726,
746
],
[
1012,
1032
]
],
[
[
140,
157
]
],
[
[
339,
351
],
[
652,
664
],
[
879,
891
]
],
[
[
466,
491
],
[
674,
699
],
[
923,
948
]
],
[
[
592,
613
],
[
279,
300
]
]
] |
# #beautifulsoup does not work with dynamically created sites
# import requests
# from bs4 import BeautifulSoup
# # The url to scrape added in tasks.py
# # URL = "https://www.opendatani.gov.uk/dataset?q=defib|AED|defibrilator"
#
#
# def scrape(url):
# # set the page and use the requests page on the url
# page = requests.get(url)
# # create a BeatifulSoup object looking and load the page content and parse
# soup = BeautifulSoup(page.content, "html.parser")
# # id="", _class=""
# results = soup.find(_class="dataset-resources unstyled")
# print(results)
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
def scrape(url):
options = webdriver.ChromeOptions()
options.add_argument(" - incognito")
browser = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
browser.get(url)
timeout = 10
try:
WebDriverWait(browser, timeout).until(
EC.visibility_of_element_located(
(By.XPATH, "//ul[@class='dataset-resources unstyled']")
)
)
except TimeoutException:
print("Timed out")
browser.quit()
data_elements = browser.find_element_by_xpath("//ul[@class='dataset-resources unstyled']")
for data_element in data_elements:
result = data_element.find_element_by_xpath(".//ul[@class='dataset-resources unstyled']")
defib_location = result.get_attribute('ul')
print(defib_location)
| [
[
[
607,
616
],
[
869,
878
],
[
950,
959
]
],
[
[
659,
672
],
[
1081,
1094
]
],
[
[
712,
737
],
[
1132,
1134
]
],
[
[
777,
793
],
[
1273,
1289
]
],
[
[
835,
837
],
[
1183,
1185
]
],
[
[
842,
848
]
]
] |
import dateparser
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class MgContagemSpider(BaseGazetteSpider):
TERRITORY_ID = "3118601"
name = "mg_contagem"
allowed_domains = ["contagem.mg.gov.br"]
start_urls = ["http://www.contagem.mg.gov.br/?se=doc"]
def parse(self, response):
"""
@url http://www.contagem.mg.gov.br/?se=doc&pagina=2
@returns items 15 15
@scrapes date file_urls is_extra_edition power
"""
anchor_elements = response.css(".texto11pt a")
urls = [
response.urljoin(url)
for url in anchor_elements.css("::attr(href)").re(".+pdf")
]
extra_editions = ["complementar" in url for url in urls]
dates_in_sentence = anchor_elements.css("p span:last-child ::text").re(
"(\d{1,2}\s+de\s+\w+\s+de\s+\d{4})"
)
dates = [
dateparser.parse(date, languages=["pt"]).date()
for date in dates_in_sentence
]
for url, date, is_extra_edition in zip(urls, dates, extra_editions):
yield Gazette(
date=date,
file_urls=[url],
is_extra_edition=is_extra_edition,
power="executive_legislature",
)
number_of_pages = int(
response.css("table.subtitulo12pt tr:first-child td ::text").extract()[-1]
)
for next_page in range(2, number_of_pages + 1):
next_page_url = f"{self.start_urls[0]}&pagina={next_page}"
yield response.follow(next_page_url, callback=self.parse)
| [
[
[
7,
17
],
[
928,
938
]
],
[
[
44,
51
],
[
1124,
1131
]
],
[
[
85,
102
],
[
128,
145
]
],
[
[
111,
127
]
]
] |
"""Source code for distributed attentional actor architecture (DA3) model.
Author: Yoshinari Motokawa <yoshinari.moto@fuji.waseda.jp>
"""
from typing import List
import torch
from core.utils.logging import initialize_logging
from omegaconf import DictConfig
from torch import nn
from ..hard_shrink_attention import HardShrinkBlock
from ..vit import Block, PatchEmbed
logger = initialize_logging(__name__)
class DA3(nn.Module):
def __init__(self, config: DictConfig, input_shape: List[int], output_size: int):
super().__init__()
patched_size_x = input_shape[1] // config.model.patch_size
patched_size_y = input_shape[2] // config.model.patch_size
self.view_method = config.observation_area_mask
self.patch_embed = PatchEmbed(
patch_size=config.model.patch_size,
in_chans=input_shape[0],
embed_dim=config.model.embed_dim,
)
self.saliency_vector = nn.Parameter(torch.zeros(1, 1, config.model.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, patched_size_x * patched_size_y + 1, config.model.embed_dim)
)
block = HardShrinkBlock if config.model.attention == "hard" else Block
self.blocks = nn.ModuleList(
[
block(
dim=config.model.embed_dim,
num_heads=config.model.num_heads,
mlp_ratio=config.model.mlp_ratio,
**{"af_lambd": config.model.af_lambd}
)
for _ in range(config.model.block_loop)
]
)
self.norm = nn.LayerNorm(config.model.embed_dim)
self.head = nn.Linear(config.model.embed_dim, output_size)
def forward(self, state):
x = self.state_encoder(state)
out = self.patch_embed(x)
saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)
out = torch.cat((saliency_vector, out), dim=1)
out = out + self.pos_embed
for blk in self.blocks:
out = blk(out)
out = self.norm(out)
out = out[:, 0]
out = self.head(out)
return out
def forward_attn(self, state):
x = self.state_encoder(state)
out = self.patch_embed(x)
saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)
out = torch.cat((saliency_vector, out), dim=1)
out = out + self.pos_embed
attns = list()
for blk in self.blocks:
out, attn = blk.forward_attn(out)
attns.append(attn.detach())
out = self.norm(out)
out = out[:, 0]
out = self.head(out)
return out, [attns]
def state_encoder(self, state):
return state[self.view_method]
| [
[
[
158,
162
],
[
489,
493
]
],
[
[
171,
176
],
[
962,
967
],
[
1056,
1061
],
[
1927,
1932
],
[
2366,
2371
]
],
[
[
208,
226
],
[
380,
398
]
],
[
[
249,
259
],
[
464,
474
]
],
[
[
278,
280
],
[
421,
423
],
[
949,
951
],
[
1030,
1032
],
[
1244,
1246
],
[
1629,
1631
],
[
1686,
1688
]
],
[
[
318,
333
],
[
1159,
1174
]
],
[
[
352,
357
],
[
1216,
1221
]
],
[
[
359,
369
],
[
764,
774
]
],
[
[
371,
377
]
],
[
[
417,
420
]
]
] |
import logging
import shelve
from ftplib import FTP
import requests
import requests_cache
from io import BytesIO
_cache_file_path = None
def set_cache_http(cache_file_path):
requests_cache.install_cache(cache_file_path)
def open_url(url):
return requests.get(url).text
def set_cache_ftp(cache_file_path):
global _cache_file_path
_cache_file_path = cache_file_path
def ftp_retrieve(server, path, filename):
logging.info('loading: ftp://%s/%s/%s' % (server, path, filename))
ftp = FTP(server)
ftp.login()
ftp.cwd(path)
buffer = BytesIO()
ftp.retrbinary('RETR %s' % filename, buffer.write)
return buffer
def download_ftp(server, path, filename, refresh_cache=False):
"""
TODO: drop shelve (too unstable) and use a simple filesystem implementation.
:param server:
:param path:
:param filename:
:param refresh_cache:
:return:
"""
if _cache_file_path:
with shelve.open(_cache_file_path) as url_cache:
location = '/'.join([server, path, filename])
if location not in url_cache or refresh_cache:
url_cache[location] = ftp_retrieve(server, path, filename)
try:
output = url_cache[location]
except KeyError:
del url_cache[location]
raise
except EOFError:
del url_cache[location]
raise
else:
output = ftp_retrieve(server, path, filename)
return output
| [
[
[
7,
14
],
[
436,
443
]
],
[
[
22,
28
],
[
951,
957
]
],
[
[
48,
51
],
[
513,
516
]
],
[
[
60,
68
],
[
260,
268
]
],
[
[
76,
90
],
[
182,
196
]
],
[
[
106,
113
],
[
572,
579
]
],
[
[
115,
131
],
[
920,
936
],
[
963,
979
]
],
[
[
145,
159
]
],
[
[
234,
242
]
],
[
[
289,
302
]
],
[
[
394,
406
],
[
1150,
1162
],
[
1462,
1474
]
],
[
[
661,
673
]
],
[
[
353,
369
]
]
] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
import torch.nn as nn
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.depth = 0.33
self.width = 0.25
self.scale = (0.5, 1.5)
self.random_size = (10, 20)
self.test_size = (416, 416)
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.enable_mixup = False
def get_model(self, sublinear=False):
def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03
if "model" not in self.__dict__:
from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead
in_channels = [256, 512, 1024]
# NANO model use depthwise = True, which is main difference.
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, depthwise=True)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, depthwise=True)
self.model = YOLOX(backbone, head)
self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
return self.model
| [
[
[
107,
109
],
[
444,
446
],
[
458,
460
]
],
[
[
120,
134
],
[
681,
683
]
],
[
[
160,
172
],
[
188,
193
]
],
[
[
184,
187
],
[
236,
239
]
]
] |
import dsz
MENU_TEXT = 'Run shares commands'
def main():
dsz.ui.Echo('Running shares -list and shares -query...', dsz.GOOD)
dsz.control.echo.Off()
dsz.cmd.Run('background log shares -list', dsz.RUN_FLAG_RECORD)
dsz.cmd.Run('background log shares -query', dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (__name__ == '__main__'):
main() | [
[
[
9,
12
],
[
68,
71
],
[
125,
128
],
[
140,
143
],
[
168,
171
],
[
211,
214
],
[
237,
240
],
[
281,
284
],
[
307,
310
]
],
[
[
14,
23
]
],
[
[
55,
59
],
[
364,
368
]
]
] |
from leapp.actors import Actor
from leapp.libraries.actor.library import remove_boot_files
from leapp.models import BootContent
from leapp.tags import IPUWorkflowTag, PreparationPhaseTag
class RemoveBootFiles(Actor):
"""
Remove Leapp provided initramfs from boot partition.
Since Leapp provided initramfs and kernel are already loaded into RAM at this phase, remove
them to have as little space requirements for boot partition as possible.
"""
name = 'remove_boot_files'
consumes = (BootContent,)
produces = ()
tags = (IPUWorkflowTag, PreparationPhaseTag)
def process(self):
remove_boot_files()
| [
[
[
25,
30
],
[
211,
216
]
],
[
[
73,
90
],
[
628,
645
]
],
[
[
116,
127
],
[
515,
526
]
],
[
[
151,
165
],
[
559,
573
]
],
[
[
167,
186
],
[
575,
594
]
],
[
[
195,
210
]
]
] |
# Generated by Django 2.2.6 on 2019-10-13 23:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Core_sample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('global_id', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=50, verbose_name='Название')),
('deposit', models.PositiveIntegerField(verbose_name='Месторождение')),
('hole', models.PositiveIntegerField(verbose_name='Скважина')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('status', models.IntegerField(choices=[(1, 'notAnalysed'), (2, 'analysed'), (3, 'inProcess'), (4, 'error')], default=1, verbose_name='Статус')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
],
options={
'verbose_name': 'Керн',
'verbose_name_plural': 'Керны',
},
),
migrations.CreateModel(
name='Fragment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dl_src', models.FilePathField(verbose_name='ДС изображение')),
('uv_src', models.FilePathField(verbose_name='УФ изображение')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('cs', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core_sample.Core_sample', verbose_name='Керн')),
],
options={
'verbose_name': 'Фрагмент керна',
'verbose_name_plural': 'Фрагменты керна',
},
),
]
| [
[
[
73,
81
],
[
291,
299
],
[
1280,
1288
]
],
[
[
104,
114
],
[
186,
196
],
[
259,
269
],
[
352,
362
],
[
1496,
1506
]
],
[
[
116,
122
],
[
452,
458
],
[
574,
580
],
[
667,
673
],
[
754,
760
],
[
839,
845
],
[
918,
924
],
[
987,
993
],
[
1054,
1060
],
[
1214,
1220
],
[
1593,
1599
],
[
1712,
1718
],
[
1793,
1799
],
[
1871,
1877
],
[
1940,
1946
],
[
2003,
2009
]
],
[
[
130,
155
],
[
1242,
1248
],
[
2031,
2037
]
],
[
[
163,
167
],
[
599,
603
]
],
[
[
176,
185
]
]
] |
"""Defines basic light string data and functions."""
import os
import sys
import atexit
import inspect
import time
import logging
from typing import Any, Optional, Sequence, Union, overload
from nptyping import NDArray
import numpy as np
from LightBerries.LightBerryExceptions import LightStringException
from LightBerries.RpiWS281xPatch import rpi_ws281x
from LightBerries.LightPixels import Pixel, PixelColors
LOGGER = logging.getLogger("LightBerries")
class LightString(Sequence[np.int_]):
"""Defines basic LED array data and functions."""
def __init__(
self,
ledCount: Optional[int] = None,
pixelStrip: rpi_ws281x.PixelStrip = None,
simulate: bool = False,
) -> None:
"""Creates a pixel array using the rpipixelStrip library and Pixels.
Args:
ledCount: the number of LEDs desired in the LightString
pixelStrip: the ws281x object that actually controls the LED signaling
simulate: dont use GPIO
Raises:
Warning: if something unexpected could happen
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
# cant run GPIO stuff without root, tell the user if they forgot
# linux check is just for debugging with fake GPIO on windows
if sys.platform == "linux" and not os.getuid() == 0: # pylint: disable = no-member
raise LightStringException(
"GPIO functionality requires root privilege. Please run command again as root"
)
# catch error cases first
if ledCount is None and pixelStrip is None and simulate is False:
raise LightStringException(
"Cannot create LightString object without ledCount or " + "pixelStrip object being specified"
)
# catch error cases first
# if ledCount is not None and pixelStrip is not None:
# raise Warning(
# "ledCount is overridden when pixelStrip is and ledcount "
# + "are both passed to LightString constructor"
# )
try:
self.simulate = simulate
# use passed led count if it is valid
if ledCount is not None:
self._ledCount = ledCount
# used passed pixel strip if it is not none
if pixelStrip is not None:
self.pixelStrip = pixelStrip
self.pixelStrip.begin()
self._ledCount = self.pixelStrip.numPixels()
LOGGER.debug(
"%s.%s Created WS281X object",
self.__class__.__name__,
inspect.stack()[0][3],
)
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
inspect.stack()[0][3],
ex,
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
try:
# validate led count
if not isinstance(self._ledCount, int):
raise LightStringException(
f'Cannot create LightString object with LED count "{self._ledCount}"',
)
# if led count is good, create our pixel sequence
self.rgbArray: NDArray[(3, Any), np.int32] = np.zeros((self._ledCount, 3))
self.rgbArray[:] = np.array([Pixel().array for i in range(self._ledCount)])
LOGGER.debug(
"%s.%s Created Numpy Light array",
self.__class__.__name__,
inspect.stack()[0][3],
)
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
inspect.stack()[0][3],
ex,
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
# try to force cleanup of underlying c objects when user exits
atexit.register(self.__del__)
def __del__(
self,
) -> None:
"""Properly disposes of the rpipixelStrip object.
Prevents memory leaks (hopefully) that were happening in the rpi.PixelStrip module.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
# check if pixel strip has been created
if isinstance(self.pixelStrip, rpi_ws281x.PixelStrip):
# turn off leds
self.off()
# cleanup c memory usage
try:
self.pixelStrip._cleanup()
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception("Failed to clean up WS281X object: %s", str(ex))
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
def __len__(
self,
) -> int:
"""Return length of the light string (the number of LEDs).
Returns:
the number of LEDs in the array
"""
if self.rgbArray is not None:
return len(self.rgbArray)
else:
return 0
@overload
def __getitem__( # noqa D105
self,
idx: int,
) -> NDArray[(3,), np.int32]:
... # pylint: disable=pointless-statement
@overload
def __getitem__( # noqa D105 # pylint: disable=function-redefined
self,
s: slice,
) -> NDArray[(3, Any), np.int32]:
... # pylint: disable=pointless-statement
def __getitem__( # pylint: disable=function-redefined
self, key: Union[int, slice]
) -> Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]]:
"""Return a LED index or slice from LED array.
Args:
key: an index of a single LED, or a slice specifying a range of LEDs
Returns:
the LED value or values as requested
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
try:
if isinstance(self.rgbArray, np.ndarray):
return self.rgbArray[key].array
else:
raise LightStringException("Cannot index into uninitialized LightString object")
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception('Failed to get key "%s" from %s: %s', key, self.rgbArray, ex)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
def __setitem__(
self,
key: Union[int, slice],
value: Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]],
) -> None:
"""Set LED value(s) in the array.
Args:
key: the index or slice specifying one or more LED indices
value: the RGB value or values to assign to the given LED indices
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
try:
if isinstance(self.rgbArray, np.ndarray):
if isinstance(key, slice):
if isinstance(value, np.ndarray):
self.rgbArray.__setitem__(key, value)
elif isinstance(value, Sequence):
self.rgbArray.__setitem__(key, [Pixel(v).array for v in value])
else:
raise LightStringException(
"Cannot assign multiple indices of LightString using a single value"
)
else:
if isinstance(value, np.ndarray):
self.rgbArray.__setitem__(key, value)
elif isinstance(value, Pixel):
self.rgbArray.__setitem__(key, Pixel(value).array)
else:
raise LightStringException(
"Cannot assign single index of LightString using multiple values"
)
else:
raise LightStringException("Cannot index into uninitialized LightString object")
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception("Failed to set light %s to value %s: %s", key, value, ex)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
def __enter__(
self,
) -> "LightString":
"""Get an instance of this object object.
Returns:
an instance of LightString
"""
return self
def __exit__(
self,
*args,
) -> None:
"""Cleanup the instance of this object.
Args:
args: ignored
"""
self.__del__()
def off(
self,
) -> None:
"""Turn all of the LEDs in the LightString off.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
for index in range(len(self.rgbArray)):
try:
self[index] = PixelColors.OFF.array
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"Failed to set pixel %s in WS281X to value %s: %s",
index,
LightString(0),
ex,
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
self.refresh()
def refresh(
self,
) -> None:
"""Update the ws281x signal using the numpy array.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
try:
# define callback for map method (fast iterator)
if self.simulate is False:
def SetPixel(irgb):
try:
i = irgb[0]
rgb = irgb[1]
value = (int(rgb[0]) << 16) + (int(rgb[1]) << 8) + int(rgb[2])
self.pixelStrip.setPixelColor(i, value)
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"Failed to set pixel %d in WS281X to value %d: %s",
i,
value,
str(ex),
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
# copy this class's array into the ws281x array
if self.simulate is False:
list(
map(
SetPixel,
enumerate(self.rgbArray),
)
)
# send the signal out
self.pixelStrip.show()
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception('Function call "show" in WS281X object failed: %s', str(ex))
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
if __name__ == "__main__":
LOGGER.info("Running LightString")
# the number of pixels in the light string
PIXEL_COUNT = 100
# GPIO pin to use for PWM signal
GPIO_PWM_PIN = 18
# DMA channel
DMA_CHANNEL = 5
# frequency to run the PWM signal at
PWM_FREQUENCY = 800000
GAMMA = None
LED_STRIP_TYPE = None
INVERT = False
PWM_CHANNEL = 0
with LightString(
pixelStrip=rpi_ws281x.PixelStrip(
num=PIXEL_COUNT,
pin=GPIO_PWM_PIN,
dma=DMA_CHANNEL,
freq_hz=PWM_FREQUENCY,
channel=PWM_CHANNEL,
invert=INVERT,
gamma=GAMMA,
strip_type=LED_STRIP_TYPE,
),
) as liteStr:
liteStr.refresh()
p = Pixel((255, 0, 0))
liteStr[4] = PixelColors.RED
liteStr.refresh()
time.sleep(1)
| [
[
[
60,
62
],
[
1411,
1413
]
],
[
[
70,
73
],
[
1379,
1382
]
],
[
[
81,
87
],
[
4395,
4401
]
],
[
[
95,
102
],
[
2692,
2699
],
[
3054,
3061
],
[
3815,
3822
],
[
4177,
4184
]
],
[
[
110,
114
],
[
13460,
13464
]
],
[
[
122,
129
],
[
422,
429
]
],
[
[
149,
152
],
[
3545,
3548
],
[
6039,
6042
],
[
6256,
6259
],
[
7387,
7390
]
],
[
[
154,
162
],
[
601,
609
]
],
[
[
164,
172
],
[
476,
484
],
[
8063,
8071
]
],
[
[
174,
179
],
[
6213,
6218
],
[
6186,
6191
],
[
7310,
7315
],
[
7344,
7349
]
],
[
[
181,
189
],
[
5740,
5748
],
[
5906,
5914
]
],
[
[
211,
218
],
[
3533,
3540
],
[
5824,
5831
],
[
6027,
6034
],
[
6219,
6226
],
[
6244,
6251
],
[
7350,
7357
],
[
7375,
7382
]
],
[
[
226,
237
],
[
485,
487
],
[
3563,
3565
],
[
3551,
3553
],
[
3624,
3626
],
[
5838,
5840
],
[
6045,
6047
],
[
6233,
6235
],
[
6262,
6264
],
[
6714,
6716
],
[
7364,
7366
],
[
7393,
7395
],
[
7848,
7850
],
[
7945,
7947
],
[
8426,
8428
]
],
[
[
284,
304
],
[
1478,
1498
],
[
1736,
1756
],
[
3129,
3149
],
[
3313,
3333
],
[
4252,
4272
],
[
5373,
5393
],
[
6815,
6835
],
[
7198,
7218
],
[
8218,
8238
],
[
8683,
8703
],
[
8865,
8885
],
[
9244,
9264
],
[
10527,
10547
],
[
12545,
12565
],
[
11827,
11847
]
],
[
[
345,
355
],
[
13033,
13043
],
[
643,
653
],
[
4878,
4888
]
],
[
[
393,
398
],
[
13370,
13375
],
[
3634,
3639
],
[
8130,
8135
],
[
8544,
8549
],
[
8607,
8612
]
],
[
[
400,
411
],
[
13410,
13421
],
[
10053,
10064
]
],
[
[
413,
419
],
[
12641,
12647
],
[
2574,
2580
],
[
2940,
2946
],
[
3693,
3699
],
[
4063,
4069
],
[
5285,
5291
],
[
7101,
7107
],
[
9151,
9157
],
[
10310,
10316
],
[
12449,
12455
],
[
11570,
11576
]
],
[
[
464,
475
],
[
13001,
13012
],
[
10447,
10458
]
],
[
[
12727,
12738
],
[
13072,
13083
]
],
[
[
12786,
12798
],
[
13101,
13113
]
],
[
[
12826,
12837
],
[
13131,
13142
]
],
[
[
12887,
12900
],
[
13164,
13177
]
],
[
[
12914,
12919
],
[
13257,
13262
]
],
[
[
12931,
12945
],
[
13287,
13301
]
],
[
[
12957,
12963
],
[
13231,
13237
]
],
[
[
12976,
12987
],
[
13199,
13210
]
],
[
[
13323,
13330
],
[
13340,
13347
],
[
13397,
13404
],
[
13434,
13441
]
],
[
[
13366,
13367
]
]
] |
# Generated by Django 2.0.5 on 2018-07-05 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aventuras', '0012_auto_20180705_1244'),
]
operations = [
migrations.AddField(
model_name='evento',
name='ourEvent',
field=models.BooleanField(default=False),
),
]
| [
[
[
71,
81
],
[
108,
118
],
[
237,
247
]
],
[
[
83,
89
],
[
338,
344
]
],
[
[
98,
107
]
]
] |
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
from scipy.linalg import solve
from ._op import OpRunBinaryNum
from ._new_ops import OperatorSchema
class Solve(OpRunBinaryNum):
atts = {'lower': False,
'transposed': False}
def __init__(self, onnx_node, desc=None, **options):
OpRunBinaryNum.__init__(self, onnx_node, desc=desc,
expected_attributes=Solve.atts,
**options)
def _find_custom_operator_schema(self, op_name):
if op_name == "Solve":
return SolveSchema()
raise RuntimeError( # pragma: no cover
"Unable to find a schema for operator '{}'.".format(op_name))
def _run(self, a, b): # pylint: disable=W0221
if self.inplaces.get(1, False):
return (solve(a, b, overwrite_b=True, lower=self.lower,
transposed=self.transposed), )
return (solve(a, b, lower=self.lower, transposed=self.transposed), )
def _infer_shapes(self, a, b): # pylint: disable=W0221
"""
Returns the shapes.
"""
return (b, )
def to_python(self, inputs):
return ('from scipy.linalg import solve',
"return solve({}, {}, lower={}, transposed={})".format(
inputs[0], inputs[1], self.lower, self.transposed))
class SolveSchema(OperatorSchema):
"""
Defines a schema for operators added in this package
such as @see cl TreeEnsembleClassifierDouble.
"""
def __init__(self):
OperatorSchema.__init__(self, 'Solve')
self.attributes = Solve.atts
| [
[
[
126,
131
],
[
871,
876
],
[
992,
997
]
],
[
[
149,
163
],
[
215,
229
],
[
360,
374
]
],
[
[
186,
200
],
[
1435,
1449
],
[
1608,
1622
]
],
[
[
209,
214
],
[
464,
469
],
[
1673,
1678
]
],
[
[
1423,
1434
],
[
623,
634
]
]
] |
from os.path import join,exists,realpath,dirname,basename
from os import makedirs,listdir, system
import numpy as np, _pickle as cPickle, editdistance, seaborn as sns
import matplotlib.pyplot as plt, pandas as pd, itertools, glob, h5py
from scipy.stats import entropy
from matplotlib.font_manager import FontProperties
from IPython.display import display
from collections import defaultdict
from IPython.display import display
# from itertools import izip
from scipy.stats import ranksums
import multiprocessing as mp
from PIL import Image
import inception_score
rundir = 'cifar10/'
e = 100
def get_score(improved_keras_dir, t_n_epoch):
score = []
for i in range(t_n_epoch-9, t_n_epoch):
print(i)
# scorefile = join(improved_keras_dir, 'epoch_{}.score'.format(i))
# if not exists(scorefile):
datafile = join(improved_keras_dir, 'epoch_{}.pkl'.format(i))
if not exists(datafile):
break
with open(datafile, 'rb') as f:
sample = cPickle.load(f)
print(len(list(sample)))
t_score = inception_score.get_inception_score(list(sample), 1)[0]
# with open(scorefile, 'w') as f:
# f.write('%f\n' % t_score)l
# else:
# with open(scorefile) as f:
# t_score = float(f.readline())
score.append(t_score)
return max(score)
expt2plot = ['optimAdam_ratio1']
for expt in expt2plot:
score = get_score(join(rundir, expt), e)
print(expt, score)
| [
[
[
20,
24
],
[
1468,
1472
],
[
849,
853
]
],
[
[
25,
31
],
[
915,
921
]
],
[
[
32,
40
]
],
[
[
41,
48
]
],
[
[
49,
57
]
],
[
[
73,
81
]
],
[
[
82,
89
]
],
[
[
91,
97
]
],
[
[
105,
116
]
],
[
[
118,
136
],
[
1012,
1019
]
],
[
[
138,
150
]
],
[
[
152,
166
]
],
[
[
174,
198
]
],
[
[
200,
212
]
],
[
[
214,
223
]
],
[
[
225,
229
]
],
[
[
231,
235
]
],
[
[
260,
267
]
],
[
[
304,
318
]
],
[
[
347,
354
]
],
[
[
379,
390
]
],
[
[
419,
426
]
],
[
[
480,
488
]
],
[
[
496,
517
]
],
[
[
534,
539
]
],
[
[
548,
563
],
[
1087,
1102
]
],
[
[
565,
571
],
[
1473,
1479
]
],
[
[
585,
586
],
[
1488,
1489
]
],
[
[
598,
607
],
[
1458,
1467
]
],
[
[
1389,
1398
],
[
1435,
1444
]
],
[
[
1427,
1431
],
[
1481,
1485
],
[
1501,
1505
]
],
[
[
1450,
1455
],
[
1507,
1512
]
]
] |
from geco.mips.loading.miplib import *
| [
[
[
37,
38
]
]
] |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vision Related User-defined Types:
- :py:class:`Image`
"""
from __future__ import annotations
from io import IOBase
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Union
from urllib.parse import urlparse
# Third-party libraries
import numpy as np
from PIL import Image as PILImage
# Rikai
from rikai.internal.uri_utils import normalize_uri
from rikai.io import copy
from rikai.mixin import Asset, Displayable, ToNumpy, ToPIL
from rikai.spark.types import ImageType
__all__ = ["Image"]
class Image(ToNumpy, ToPIL, Asset, Displayable):
"""An external Image Asset.
It contains a reference URI to an image stored on the remote system.
Parameters
----------
image : bytes, file-like object, str or :py:class:`~pathlib.Path`
It can be the content of image, or a URI / Path of an image.
"""
__UDT__ = ImageType()
def __init__(
self,
image: Union[bytes, bytearray, IOBase, str, Path],
):
data, uri = None, None
if isinstance(image, IOBase):
data = image.read()
elif isinstance(image, (bytes, bytearray)):
data = image
else:
uri = image
super().__init__(data=data, uri=uri)
@classmethod
def from_array(
cls,
array: np.ndarray,
uri: Union[str, Path],
mode: str = None,
format: str = None,
**kwargs,
) -> Image:
"""Create an image in memory from numpy array.
Parameters
----------
array : np.ndarray
Array data
uri : str or Path
The external URI to store the data.
mode : str, optional
The mode which PIL used to create image. See supported
`modes on PIL document <https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes>`_.
format : str, optional
The image format to save as. See
`supported formats <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_ for details.
kwargs : dict, optional
Optional arguments to pass to `PIL.Image.save <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_.
See Also
--------
:py:class:`PIL.Image.fromarray`
:py:func:`~rikai.spark.functions.vision.numpy_to_image`
""" # noqa: E501
assert array is not None
img = PILImage.fromarray(array, mode=mode)
return cls.from_pil(img, uri, format=format, **kwargs)
@staticmethod
def from_pil(
img: PILImage, uri: Union[str, Path], format: str = None, **kwargs
) -> Image:
"""Create an image in memory from a :py:class:`PIL.Image`.
Parameters
----------
img : :py:class:`PIL.Image`
An PIL Image instance
uri : str or Path
The URI to store the image externally.
format : str, optional
The image format to save as. See
`supported formats <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_ for details.
kwargs : dict, optional
Optional arguments to pass to `PIL.Image.save <https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save>`_.
""" # noqa: E501
parsed = urlparse(normalize_uri(uri))
if parsed.scheme == "file":
img.save(uri, format=format, **kwargs)
else:
with NamedTemporaryFile() as fobj:
img.save(fobj, format=format, **kwargs)
fobj.flush()
copy(fobj.name, uri)
return Image(uri)
def display(self, **kwargs):
"""
Custom visualizer for this image in jupyter notebook
Parameters
----------
kwargs: dict
Optional display arguments
Returns
-------
img: IPython.display.Image
"""
from IPython.display import Image
with self.open() as fobj:
return Image(fobj.read(), **kwargs)
def __repr__(self) -> str:
return f"Image(uri={self.uri})"
def _repr_html_(self):
"""Default visualizer for remote ref (or local ref under cwd)"""
return self.display()._repr_html_()
def _repr_mimebundle_(self, include=None, exclude=None):
"""default visualizer for embedded mime bundle"""
return self.display()._repr_mimebundle_(
include=include, exclude=exclude
)
def _repr_jpeg_(self):
"""default visualizer for embedded jpeg"""
return self.display()._repr_jpeg_()
def _repr_png_(self):
"""default visualizer for embedded png"""
return self.display()._repr_png_()
def __eq__(self, other) -> bool:
return isinstance(other, Image) and super().__eq__(other)
def to_pil(self) -> PILImage:
"""Return an PIL image.
Note
----
The caller should close the image.
https://pillow.readthedocs.io/en/stable/reference/open_files.html#image-lifecycle
"""
return PILImage.open(self.open())
def to_numpy(self) -> np.ndarray:
"""Convert this image into an :py:class:`numpy.ndarray`."""
with self.to_pil() as pil_img:
return np.asarray(pil_img)
| [
[
[
641,
652
]
],
[
[
669,
675
],
[
1518,
1524
],
[
1605,
1611
]
],
[
[
696,
700
],
[
1531,
1535
],
[
1908,
1912
],
[
3207,
3211
]
],
[
[
722,
740
],
[
4085,
4103
]
],
[
[
760,
765
],
[
1494,
1499
],
[
1897,
1902
],
[
3196,
3201
]
],
[
[
791,
799
],
[
3938,
3946
]
],
[
[
832,
843
],
[
1872,
1874
],
[
5770,
5772
],
[
5908,
5910
]
],
[
[
860,
877
],
[
3031,
3039
],
[
3181,
3189
],
[
5487,
5495
],
[
5716,
5724
]
],
[
[
924,
937
],
[
3947,
3960
]
],
[
[
959,
963
],
[
4216,
4220
]
],
[
[
988,
993
],
[
1114,
1119
]
],
[
[
995,
1006
],
[
1121,
1132
]
],
[
[
1008,
1015
],
[
1098,
1105
]
],
[
[
1017,
1022
],
[
1107,
1112
]
],
[
[
1053,
1062
],
[
1434,
1443
]
],
[
[
1064,
1071
]
],
[
[
1092,
1097
],
[
1996,
2001
],
[
3252,
3257
],
[
4252,
4257
],
[
5429,
5434
]
]
] |
#------------------------------------------------------------------------------
# Copyright (c) 2008 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Model view menus, menu items and toolbars. """
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from os.path import join, dirname
from enthought.pyface.api import ImageResource
from enthought.traits.ui.menu import MenuBar, ToolBar, Menu, Action
#------------------------------------------------------------------------------
# File actions:
#------------------------------------------------------------------------------
new_action = Action(name="&New", accelerator="Ctrl+N", action="new_model",
image=ImageResource("new"), tooltip="New (Ctrl+N)")
open_action = Action(name="&Open", accelerator="Ctrl+O", action="open_file",
image=ImageResource("open"), tooltip="Open (Ctrl+O)")
save_action = Action(name="&Save", accelerator="Ctrl+S",
action="save", image=ImageResource("save"), tooltip="Save (Ctrl+S)")
save_as_action = Action(name="Save &As", accelerator="Ctrl+Shift+S",
action="save_as", image=ImageResource("save"),
tooltip="Save As (Ctrl+Shift+S)")
# Action to revert all changes.
revert_action = Action(name="Revert", action="_on_revert",
defined_when="ui.history is not None", enabled_when="ui.history.can_undo")
# Action to close the view window.
close_action = Action(name="E&xit", accelerator="Alt+X", action="on_exit",
image=ImageResource("exit"), tooltip="Exit (Alt+X)")
#------------------------------------------------------------------------------
# Edit actions:
#------------------------------------------------------------------------------
# Action to undo last change.
undo_action = Action(name="Undo", action="_on_undo", accelerator="Ctrl+Z",
defined_when="ui.history is not None", enabled_when="ui.history.can_undo",
# image=ImageResource("undo"),
tooltip="Undo (Ctrl+Z)")
# Action to redo last undo.
redo_action = Action(name="Redo", action="_on_redo", accelerator="Ctrl+Y",
defined_when="ui.history is not None", enabled_when="ui.history.can_redo",
# image=ImageResource("redo.png"),
tooltip="Redo (Ctrl+Y)")
options_action = Action(name="Prefere&nces", action="godot_options")
#------------------------------------------------------------------------------
# View actions:
#------------------------------------------------------------------------------
tree_view_action = Action(
name="Tree", accelerator="Ctrl+T", action="toggle_tree",
tooltip="Tree view (Ctrl+T)", #image=ImageResource("tree"),
style="toggle", checked=True
)
configure_graph_action = Action(name="&Graph Attributes",
accelerator="Ctrl+G",
action="configure_graph", image=ImageResource("graph"),
tooltip="Graph Attributes (Ctrl+G)")
configure_nodes_action = Action(name="&Node Table",
accelerator="Ctrl+Shift+N",
action="configure_nodes", image=ImageResource("node"),
tooltip="Nodes (Ctrl+Shift+N)")
configure_edges_action = Action(name="&Edge Table",
accelerator="Ctrl+Shift+E",
action="configure_edges", image=ImageResource("edge"),
tooltip="Edges (Ctrl+Shift+E)")
configure_dot_code_action = Action(name="&Dot Editor", accelerator="Ctrl+D",
action="configure_dot_code", image=ImageResource("graph"),
tooltip="Dot Editor (Ctrl+D)")
#------------------------------------------------------------------------------
# Graph actions:
#------------------------------------------------------------------------------
node_action = Action(name="&Node", accelerator="Alt+N", action="add_node",
image=ImageResource("node"), tooltip="Node (Alt+N)")
edge_action = Action(name="&Edge", accelerator="Alt+E", action="add_edge",
image=ImageResource("edge"), tooltip="Edge (Alt+E)")
subgraph_action = Action(name="&Subgraph", accelerator="Alt+S",
action="add_subgraph", image=ImageResource("subgraph"),
tooltip="Subgraph (Alt+S)")
cluster_action = Action(name="&Cluster", accelerator="Alt+C",
action="add_cluster", image=ImageResource("cluster"),
tooltip="Cluster (Alt+C)")
#------------------------------------------------------------------------------
# Help actions:
#------------------------------------------------------------------------------
# Action to show help for the graph.
help_action = Action(name="Help", action="show_help",
image=ImageResource("help.png"), tooltip="Help")
about_action = Action(name="About Godot", action="about_godot",
image=ImageResource("about"), tooltip="About Godot")
#------------------------------------------------------------------------------
# Menus:
#------------------------------------------------------------------------------
file_menu = Menu(
"|", # Hack suggested by Brennan Williams to achieve correct ordering
new_action, open_action, "_",
save_action, save_as_action, revert_action, "_",
close_action, name="&File"
)
edit_menu = Menu("|", undo_action, redo_action, "_", options_action,
name="&Edit")
view_menu = Menu("|", tree_view_action, "_", configure_graph_action,
configure_nodes_action, configure_edges_action, configure_dot_code_action,
name="&View")
graph_menu = Menu("|", node_action, edge_action, subgraph_action,
cluster_action, name="&Graph")
help_menu = Menu("|", #help_action, "_",
about_action, name="&Help")
menubar = MenuBar(file_menu, edit_menu, view_menu, graph_menu, help_menu)
#------------------------------------------------------------------------------
# Godot "ToolBar" instance:
#------------------------------------------------------------------------------
toolbar = ToolBar(
"|", #close_action, "_",
new_action, open_action, save_action, save_as_action, "_",
undo_action, redo_action, "_",
node_action, edge_action,
configure_graph_action,
configure_nodes_action,
configure_edges_action,
show_tool_names=False, #show_divider=False
)
# EOF -------------------------------------------------------------------------
| [
[
[
1519,
1523
]
],
[
[
1525,
1532
]
],
[
[
1567,
1580
],
[
1913,
1926
],
[
2047,
2060
],
[
2178,
2191
],
[
2324,
2337
],
[
2677,
2690
],
[
3957,
3970
],
[
4143,
4156
],
[
4323,
4336
],
[
4499,
4512
],
[
4823,
4836
],
[
4956,
4969
],
[
5101,
5114
],
[
5255,
5268
],
[
5592,
5605
],
[
5710,
5723
]
],
[
[
1618,
1625
],
[
6583,
6590
]
],
[
[
1627,
1634
],
[
6848,
6855
]
],
[
[
1636,
1640
],
[
5941,
5945
],
[
6154,
6158
],
[
6242,
6246
],
[
6410,
6414
],
[
6511,
6515
]
],
[
[
1642,
1648
],
[
1841,
1847
],
[
1974,
1980
],
[
2110,
2116
],
[
2244,
2250
],
[
2434,
2440
],
[
2607,
2613
],
[
2947,
2953
],
[
3193,
3199
],
[
3418,
3424
],
[
3668,
3674
],
[
3862,
3868
],
[
4048,
4054
],
[
4228,
4234
],
[
4411,
4417
],
[
4752,
4758
],
[
4885,
4891
],
[
5022,
5028
],
[
5178,
5184
],
[
5542,
5548
],
[
5651,
5657
]
],
[
[
1828,
1838
],
[
6025,
6035
],
[
6890,
6900
]
],
[
[
1960,
1971
],
[
6037,
6048
],
[
6902,
6913
]
],
[
[
2096,
2107
],
[
6059,
6070
],
[
6915,
6926
]
],
[
[
2227,
2241
],
[
6072,
6086
],
[
6928,
6942
]
],
[
[
2418,
2431
],
[
6088,
6101
]
],
[
[
2592,
2604
],
[
6112,
6124
]
],
[
[
2933,
2944
],
[
6164,
6175
],
[
6953,
6964
]
],
[
[
3179,
3190
],
[
6177,
6188
],
[
6966,
6977
]
],
[
[
3401,
3415
],
[
6195,
6209
]
],
[
[
3649,
3665
],
[
6252,
6268
]
],
[
[
3837,
3859
],
[
6275,
6297
],
[
7018,
7040
]
],
[
[
4023,
4045
],
[
6303,
6325
],
[
7046,
7068
]
],
[
[
4203,
4225
],
[
6327,
6349
],
[
7074,
7096
]
],
[
[
4383,
4408
],
[
6351,
6376
]
],
[
[
4738,
4749
],
[
6420,
6431
],
[
6988,
6999
]
],
[
[
4871,
4882
],
[
6433,
6444
],
[
7001,
7012
]
],
[
[
5004,
5019
],
[
6446,
6461
]
],
[
[
5161,
5175
],
[
6467,
6481
]
],
[
[
5528,
5539
]
],
[
[
5636,
5648
],
[
6544,
6556
]
],
[
[
5929,
5938
],
[
6591,
6600
]
],
[
[
6142,
6151
],
[
6602,
6611
]
],
[
[
6230,
6239
],
[
6613,
6622
]
],
[
[
6397,
6407
],
[
6624,
6634
]
],
[
[
6499,
6508
],
[
6636,
6645
]
],
[
[
6573,
6580
]
],
[
[
6838,
6845
]
]
] |
# Copyright 2022 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import unittest
import torch
from torch.nn import functional as F
from torch_blade import tensorrt
from torch_blade import utils
from torch_blade import tools
from torch_blade import Config
from torch_blade.logging import logger
from torch_blade.testing.common_utils import Feedforward, TestCase
from tests.tensorrt import skipIfNoTensorRT
from torch_blade.onnx_backends.backend_testbed import OnnxBackendChecker
@skipIfNoTensorRT()
class TestTensorRTSupportInfo(TestCase):
def test_support_info(self):
input = torch.ones([10, 10]).cuda()
net = Feedforward(10, 10)
net.eval().cuda()
module = torch.jit.trace(net, input)
module = tools.freeze_module(module._c, disableShapePeephole=False)
graph = module.forward.graph
unsupported = tensorrt.get_unsupported_nodes(graph)
self.assertEqual(len(unsupported), 0)
def test_empty_onnx_export(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(3, 4)
self.dropout = torch.nn.Dropout(p=0.8)
def forward(self, x):
x = self.linear(x)
x = self.dropout(x)
return x.contiguous().detach()
model = Model().cuda().eval()
module = torch.jit.trace(model, torch.ones([2, 3]).cuda())
module = tools.freeze_module(module._c, disableShapePeephole=False)
graph = module.forward.graph
unsupported = tensorrt.get_unsupported_nodes(graph)
self.assertEqual(len(unsupported), 0)
def test_inplace_safety(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 10, kernel_size=3, padding=1)
self.conv2 = torch.nn.Conv2d(10, 3, kernel_size=3, padding=1)
self.conv3 = torch.nn.Conv2d(3, 3, kernel_size=3, padding=1)
self.bnorm = torch.nn.BatchNorm2d(3)
def forward_inplace(self, x):
out = self.conv1(x)
# this inplace bias is supported
out += 1
# this inplace relu_ is supported
out = F.relu_(out)
out = self.conv2(out)
# this inplace relu_ is supported
out = F.relu_(out)
shortcut = out
# this inplace add_ is supported
out += shortcut
shortcut = out
out = self.conv3(out)
out = self.bnorm(out)
# this inplace add_ is supported
out += shortcut
out1 = out[:, :1, :, :]
out2 = out[:, 1:, :, :]
out1 = F.relu_(out1)
out2 = F.relu_(out2)
out[:, :1, :, :] = out1
out[:, 1:, :, :] = out2
return out
def forward_no_inplace(self, x):
out = self.conv1(x)
out = out + 1
out = F.relu(out)
out = self.conv2(out)
out = F.relu(out)
shortcut = out
out = out + shortcut
shortcut = out
out = self.conv3(out)
out = self.bnorm(out)
out = out + shortcut
out = F.relu(out)
return out
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.block1 = BasicBlock()
self.block2 = BasicBlock()
def forward(self, x):
out1 = self.block1.forward_inplace(x)
out1 = self.block2.forward_inplace(out1)
out2 = self.block1.forward_no_inplace(x)
out2 = self.block2.forward_no_inplace(out2)
return out1, out2
model = Model()
model.eval()
model.cuda()
batch = torch.ones([1, 3, 224, 224])
batch = batch.cuda()
out1, out2 = model(batch)
self.assertEqual(out1, out2)
traced_model = torch.jit.trace(model, batch)
frozen_module = tools.freeze_module(traced_model._c, disableShapePeephole=False)
graph = frozen_module.forward.graph
ops_counter = utils.list_ops_count(graph)
unspt_counter = collections.Counter()
unsupported = tensorrt.get_unsupported_nodes(graph)
for node in unsupported:
unspt_counter[node.kind()] += 1
self.assertEqual(ops_counter["aten::slice"], unspt_counter["aten::slice"])
self.assertEqual(ops_counter["aten::view"], unspt_counter["aten::view"])
self.assertEqual(ops_counter["aten::copy_"], unspt_counter["aten::copy_"])
self.assertEqual(ops_counter["aten::expand"], unspt_counter["aten::expand"])
self.assertEqual(unspt_counter["aten::relu_"], 4)
logger.info(ops_counter)
logger.info(unspt_counter)
self.assertEqual(unspt_counter["aten::add_"], 0)
def test_inplace_safety_another(self):
def op(x):
return x + 1
def op_(x):
x -= 1
return x
def _count_unsupported(unspt):
unspt_counter = collections.Counter()
for node in unspt:
unspt_counter[node.kind()] += 1
return unspt_counter
def _count_graph(graph):
unsupported = tensorrt.get_unsupported_nodes(graph, ignore_device=True)
return _count_unsupported(unsupported)
def _count_model(model):
model.eval().cuda()
input = torch.zeros([4]).cuda()
output = model(input)
traced_module = torch.jit.trace(model, (input,))
graph = traced_module.graph
return _count_graph(graph)
class Model1(torch.nn.Module):
"""
Within this model, torch.jit.trace will produce graph like:
%2 : Float = aten::add(%1, some_constant)
%3 : Float = aten::sub_(%2, some_constant)
%4 : Float = aten::add(%3, some_constant)
The input of the third node is %3 instead of %2 which is not consistent with the definition of the
corresponding nn.Module. So the inplace node aten::sub_ is the last consumer of its inputs which make it
inplace-safe, and therefore all the nodes in this graph is inplace-safe.
The same phenomenon occurs in model2. So we manually add two graphs that have 'correct' topology structures
with corresponding nn.Module (i.e. Model1 and Model2) and use them as UTs.
"""
def forward(self, x):
x1 = op(x)
x2 = op_(x1)
x3 = op(x1)
return x3
class Model2(torch.nn.Module):
def forward(self, x):
x1 = op(x)
x2 = op_(x1) # support
x3 = op_(x2) # support
x4 = op(x3)
x5 = op_(x3) # not support
x6 = op_(x5) # not support
x7 = op(x3)
return x7
unspt_counter = _count_model(Model1())
self.assertEqual(unspt_counter["aten::sub_"], 0)
unspt_counter = _count_model(Model2())
self.assertEqual(unspt_counter["aten::sub_"], 0)
if utils.torch_version_number() >= utils.parse_version("1.8.1"):
graph1 = torch.parse_ir(
"""
graph( %x.1 : Float(4)):
%1 : int = prim::Constant[value=1]()
%2 : Float(4) = aten::add(%x.1, %1, %1)
%3 : int = prim::Constant[value=1]()
%4 : Float(4) = aten::sub_(%2, %3, %3)
%5 : int = prim::Constant[value=1]()
%6 : Float(4) = aten::add(%2, %5, %5)
return (%6)
"""
)
graph2 = torch.parse_ir(
"""
graph( %x.1 : Float(4)):
%1 : int = prim::Constant[value=1]()
%2 : Float(4) = aten::add(%x.1, %1, %1)
%3 : int = prim::Constant[value=1]()
%4 : Float(4) = aten::sub_(%2, %3, %3)
%5 : int = prim::Constant[value=1]()
%6 : Float(4) = aten::sub_(%4, %5, %5)
%7 : int = prim::Constant[value=1]()
%8 : Float(4) = aten::add(%6, %7, %7)
%9 : int = prim::Constant[value=1]()
%10 : Float(4) = aten::sub_(%6, %9, %9)
%11 : int = prim::Constant[value=1]()
%12 : Float(4) = aten::sub_(%10, %11, %11)
%13 : int = prim::Constant[value=1]()
%14 : Float(4) = aten::add(%6, %13, %13)
return (%14)
"""
)
else:
graph1 = torch.parse_ir(
"""
graph( %x.1 : Float(4:1)):
%1 : int = prim::Constant[value=1]()
%2 : Float(4:1) = aten::add(%x.1, %1, %1)
%3 : int = prim::Constant[value=1]()
%4 : Float(4:1) = aten::sub_(%2, %3, %3)
%5 : int = prim::Constant[value=1]()
%6 : Float(4:1) = aten::add(%2, %5, %5)
return (%6)
"""
)
graph2 = torch.parse_ir(
"""
graph( %x.1 : Float(4:1)):
%1 : int = prim::Constant[value=1]()
%2 : Float(4:1) = aten::add(%x.1, %1, %1)
%3 : int = prim::Constant[value=1]()
%4 : Float(4:1) = aten::sub_(%2, %3, %3)
%5 : int = prim::Constant[value=1]()
%6 : Float(4:1) = aten::sub_(%4, %5, %5)
%7 : int = prim::Constant[value=1]()
%8 : Float(4:1) = aten::add(%6, %7, %7)
%9 : int = prim::Constant[value=1]()
%10 : Float(4:1) = aten::sub_(%6, %9, %9)
%11 : int = prim::Constant[value=1]()
%12 : Float(4:1) = aten::sub_(%10, %11, %11)
%13 : int = prim::Constant[value=1]()
%14 : Float(4:1) = aten::add(%6, %13, %13)
return (%14)
"""
)
unspt_counter = _count_graph(graph1)
self.assertEqual(unspt_counter["aten::sub_"], 1)
unspt_counter = _count_graph(graph2)
self.assertEqual(unspt_counter["aten::sub_"], 2)
def test_graph_input_inplace_safe(self):
class Model(torch.nn.Module):
def forward(self, x):
return F.relu_(x)
batch = torch.Tensor([1, -1, 1, -1])
batch = batch.cuda()
model = Model().eval().cuda()
traced_model = torch.jit.trace(model, batch)
self.assertEqual(batch, torch.Tensor([1, 0, 1, 0]))
frozen_module = torch._C._freeze_module(traced_model._c)
graph = frozen_module.forward.graph
unspt_counter = collections.Counter()
unsupported = tensorrt.get_unsupported_nodes(graph)
for node in unsupported:
unspt_counter[node.kind()] += 1
self.assertEqual(unspt_counter["aten::relu_"], 1)
def test_view_kinds_0(self):
if utils.torch_version_number() >= utils.parse_version("1.8.1"):
graph = torch.parse_ir(
"""
graph( %x.1 : Float(1, 1, 1)):
%1 : int = prim::Constant[value=0]()
%2 : int = prim::Constant[value=1]()
%3 : Float(1, 1) = aten::select(%x.1, %1, %2)
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
%6 : Float(1) = aten::select(%3, %4, %5)
%7 : int = prim::Constant[value=1]()
%8 : int = prim::Constant[value=1]()
%9 : Float(1) = aten::add(%6, %7, %8)
return (%9)
"""
)
else:
graph = torch.parse_ir(
"""
graph( %x.1 : Float(1:1, 1:1, 1:1)):
%1 : int = prim::Constant[value=0]()
%2 : int = prim::Constant[value=1]()
%3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
%6 : Float(1:1) = aten::select(%3, %4, %5)
%7 : int = prim::Constant[value=1]()
%8 : int = prim::Constant[value=1]()
%9 : Float(1:1) = aten::add(%6, %7, %8)
return (%9)
"""
)
unsupported = tensorrt.get_unsupported_nodes(graph, True)
self.assertEqual(len(unsupported), 0)
def test_view_kinds_1(self):
if utils.torch_version_number() >= utils.parse_version("1.8.1"):
graph = torch.parse_ir(
"""
graph( %x.1 : Float(1, 1, 1)):
%1 : int = prim::Constant[value=0]()
%2 : int = prim::Constant[value=1]()
%3 : Float(1, 1) = aten::select(%x.1, %1, %2)
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
%6 : Float(1) = aten::select(%3, %4, %5)
%7 : int = prim::Constant[value=1]()
%8 : int = prim::Constant[value=1]()
%9 : Float(1) = aten::add_(%6, %7, %8)
return (%9)
"""
)
else:
graph = torch.parse_ir(
"""
graph( %x.1 : Float(1:1, 1:1, 1:1)):
%1 : int = prim::Constant[value=0]()
%2 : int = prim::Constant[value=1]()
%3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
%6 : Float(1:1) = aten::select(%3, %4, %5)
%7 : int = prim::Constant[value=1]()
%8 : int = prim::Constant[value=1]()
%9 : Float(1:1) = aten::add_(%6, %7, %8)
return (%9)
"""
)
unsupported = tensorrt.get_unsupported_nodes(graph, True)
self.assertEqual(len(unsupported), 3)
def test_view_kinds_2(self):
if utils.torch_version_number() >= utils.parse_version("1.8.1"):
graph = torch.parse_ir(
"""
graph( %x.1 : Float(1, 1, 1)):
%1 : int = prim::Constant[value=0]()
%2 : int = prim::Constant[value=1]()
%3 : Float(1, 1) = aten::select(%x.1, %1, %2)
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
%6 : Float(1, 1) = aten::add_(%3, %4, %5)
%7 : int = prim::Constant[value=1]()
%8 : int = prim::Constant[value=1]()
%9 : Float(1) = aten::select(%3, %7, %8)
return (%9)
"""
)
else:
graph = torch.parse_ir(
"""
graph( %x.1 : Float(1:1, 1:1, 1:1)):
%1 : int = prim::Constant[value=0]()
%2 : int = prim::Constant[value=1]()
%3 : Float(1:1, 1:1) = aten::select(%x.1, %1, %2)
%4 : int = prim::Constant[value=0]()
%5 : int = prim::Constant[value=1]()
%6 : Float(1:1, 1:1) = aten::add_(%3, %4, %5)
%7 : int = prim::Constant[value=1]()
%8 : int = prim::Constant[value=1]()
%9 : Float(1:1) = aten::select(%3, %7, %8)
return (%9)
"""
)
unsupported = tensorrt.get_unsupported_nodes(graph, True)
self.assertEqual(len(unsupported), 3)
# NOTE: this unsupported set length should be 3 (two aten::select and one aten::add_)
# However, due to a flaw of the inplace safety check algorithm, aten::add_ is excluded
# in the set.
# todo: fix this error.
# graph = torch.parse_ir(
# '''
# graph( %x.1 : Float(1:1, 1:1, 1:1)):
# %1 : int = prim::Constant[value=0]()
# %2 : int = prim::Constant[value=1]()
# %3 : Float(1:1, 1:1, 1:1) = aten::add(%x.1, %1, %2)
# %4 : int = prim::Constant[value=0]()
# %5 : int = prim::Constant[value=1]()
# %6 : Float(1:1, 1:1) = aten::select(%3, %4, %5)
# %7 : Float(1:1, 1:1) = aten::add_(%3, %4, %5)
# %8 : int = prim::Constant[value=1]()
# %9 : int = prim::Constant[value=1]()
# %10 : Float(1:1) = aten::select(%6, %8, %9)
# return (%9)
# '''
# )
# unsupported = tensorrt.get_unsupported_nodes(graph, True)
# self.assertEqual(len(unsupported), 2)
@skipIfNoTensorRT()
class TestManRules(TestCase):
def _make_check(self, graph, target):
checker = OnnxBackendChecker(graph, tensorrt.is_onnx2trt_supported, "TensorRT")
is_supported = checker()
self.assertEqual(is_supported, target)
def test_aten_mul(self):
graph = torch.parse_ir(
"""
graph(%0 : int[]):
%1 : int = prim::Constant[value=1]()
%3 : int = aten::mul(%0, %1)
return (%3)
"""
)
self._make_check(graph, False)
def test_aten_add(self):
graph = torch.parse_ir(
"""
graph(%0 : int[], %1 : int[]):
%2 : int[] = aten::add(%0, %1)
return (%2)
"""
)
self._make_check(graph, False)
def test_aten_eq(self):
graph = torch.parse_ir(
"""
graph(%0 : int[]):
%1 : int = prim::Constant[value=1]()
%2 : int[] = prim::ListConstruct(%1)
%3 : bool = aten::eq(%0, %2)
return (%3)
"""
)
self._make_check(graph, False)
def test_const_fold_before_export(self):
if utils.torch_version_number() >= utils.parse_version("1.8.1"):
graph = torch.parse_ir(
"""
graph(%input0.2 : Float(1, 512, 18, 18, requires_grad=0, device=cuda:0)):
%1 : None = prim::Constant() # :0:0
%2 : bool = prim::Constant[value=1]()
%3 : float[] = prim::Constant[value=[2., 2.]]()
%x1.3 : Float(1, 512, 36, 36, requires_grad=0, device=cuda:0) = aten::upsample_bilinear2d(%input0.2, %1, %2, %3)
return (%x1.3)
"""
)
else:
graph = torch.parse_ir(
"""
graph(%input0.2 : Float(1:165888, 512:324, 18:18, 18:1, requires_grad=0, device=cuda:0)):
%1 : None = prim::Constant() # :0:0
%2 : bool = prim::Constant[value=1]()
%3 : float[] = prim::Constant[value=[2., 2.]]()
%x1.3 : Float(1:663552, 512:1296, 36:36, 36:1, requires_grad=0, device=cuda:0) = aten::upsample_bilinear2d(%input0.2, %1, %2, %3)
return (%x1.3)
"""
)
cfg = Config.get_current_context_or_new().clone()
cfg.customize_onnx_opset_version = 11
with cfg:
self._make_check(graph, True)
def test_scalar_input_on_graph(self):
if utils.torch_version_number() >= utils.parse_version("1.8.1"):
graph = torch.parse_ir(
"""
graph(%x.3 : Float(1, 64, 1, 1, requires_grad=0, device=cuda:0),
%1 : int):
%2 : int = prim::Constant[value=-1]()
%3 : int[] = prim::ListConstruct(%1, %2)
%input.14 : Float(1, 64, requires_grad=0, device=cuda:0) = aten::view(%x.3, %3)
return (%input.14)
"""
)
else:
graph = torch.parse_ir(
"""
graph(%x.3 : Float(1:64, 64:1, 1:1, 1:1, requires_grad=0, device=cuda:0),
%1 : int):
%2 : int = prim::Constant[value=-1]()
%3 : int[] = prim::ListConstruct(%1, %2)
%input.14 : Float(1:64, 64:1, requires_grad=0, device=cuda:0) = aten::view(%x.3, %3)
return (%input.14)
"""
)
self._make_check(graph, True)
if __name__ == "__main__":
unittest.main()
| [
[
[
605,
616
],
[
5070,
5081
],
[
12024,
12035
],
[
5961,
5972
]
],
[
[
624,
632
],
[
22192,
22200
]
],
[
[
640,
645
],
[
1142,
1147
],
[
1247,
1252
],
[
1554,
1559
],
[
1966,
1971
],
[
1989,
1994
],
[
2297,
2302
],
[
4118,
4123
],
[
4681,
4686
],
[
4833,
4838
],
[
6570,
6575
],
[
7556,
7561
],
[
8189,
8194
],
[
8720,
8725
],
[
9754,
9759
],
[
10293,
10298
],
[
11578,
11583
],
[
11681,
11686
],
[
11800,
11805
],
[
11862,
11867
],
[
11915,
11920
],
[
12368,
12373
],
[
13102,
13107
],
[
14055,
14060
],
[
14790,
14795
],
[
15744,
15749
],
[
16482,
16487
],
[
18762,
18767
],
[
19072,
19077
],
[
19340,
19345
],
[
19798,
19803
],
[
20342,
20347
],
[
21185,
21190
],
[
21663,
21668
],
[
1680,
1685
],
[
1733,
1738
],
[
2427,
2432
],
[
2505,
2510
],
[
2583,
2588
],
[
2660,
2665
],
[
6350,
6355
],
[
6436,
6441
]
],
[
[
667,
682
],
[
2909,
2910
],
[
3032,
3033
],
[
3448,
3449
],
[
3485,
3486
],
[
3740,
3741
],
[
3812,
3813
],
[
4058,
4059
],
[
11653,
11654
]
],
[
[
707,
715
],
[
1411,
1419
],
[
2152,
2160
],
[
5114,
5122
],
[
12068,
12076
],
[
13838,
13846
],
[
15527,
15535
],
[
17224,
17232
],
[
18592,
18600
],
[
6155,
6163
]
],
[
[
740,
745
],
[
5018,
5023
],
[
8106,
8111
],
[
8138,
8143
],
[
12286,
12291
],
[
12318,
12323
],
[
13973,
13978
],
[
14005,
14010
],
[
15662,
15667
],
[
15694,
15699
],
[
19716,
19721
],
[
19748,
19753
],
[
21103,
21108
],
[
21135,
21140
]
],
[
[
770,
775
],
[
1292,
1297
],
[
2033,
2038
],
[
4887,
4892
]
],
[
[
800,
806
],
[
20899,
20905
]
],
[
[
839,
845
],
[
5627,
5633
],
[
5660,
5666
]
],
[
[
891,
902
],
[
1184,
1195
]
],
[
[
904,
912
],
[
1082,
1090
],
[
18495,
18503
]
],
[
[
940,
956
],
[
1033,
1049
],
[
18457,
18473
]
],
[
[
1011,
1029
],
[
18566,
18584
]
],
[
[
1058,
1081
]
],
[
[
18482,
18494
]
]
] |
"""Collectors to crawl free IP proxies from the internet
"""
| [] |
#dealing with unexpected results
#great for writing complex programs
try:
print (a) #throw an exception
except:
print("a is not defined")
#a is not defined, instead of crashing program,
#we can ask it to tell us what the problem is
try:
print(a)
except NameError: #if this is the error...
print("a still isn't defined")
except: #if not...
print("Something else is wrong")
print(a) #this will not work and will
#BREAK the program
| [] |
# Copyright 2020 The PEGASUS Authors..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summarization params of baseline models for downstream datasets."""
import functools
from pegasus.data import parsers
from pegasus.eval import estimator_metrics
from pegasus.eval import text_eval
from pegasus.models import transformer
from pegasus.ops import public_parsing_ops
from pegasus.params import pegasus_params
from pegasus.params import registry
from tensorflow.contrib import training as contrib_training
def transformer_params(patterns, param_overrides):
"""Params for TransformerEncoderDecoderMLModel.
Args:
patterns: a dict include train_pattern, dev_pattern, test_pattern
param_overrides: a string, comma separated list of name=value
Returns:
A instance of HParams
"""
hparams = contrib_training.HParams(
train_pattern=patterns["train_pattern"],
dev_pattern=patterns["dev_pattern"],
test_pattern=patterns["test_pattern"],
vocab_filename="pegasus/ops/testdata/sp_test.model",
encoder_type="sentencepiece_newline",
length_bucket_size=0,
add_task_id=False,
batch_size=patterns["batch_size"],
max_input_len=patterns["max_input_len"],
max_target_len=patterns["max_output_len"],
max_decode_len=patterns["max_output_len"],
hidden_size=768,
filter_size=3072,
num_heads=12,
num_encoder_layers=12,
num_decoder_layers=12,
beam_size=1,
beam_start=5,
beam_alpha=0.6,
beam_min=0,
beam_max=-1,
temperature=0.0,
top_k=0,
top_p=0.0,
optimizer_name="adafactor",
train_steps=patterns["train_steps"],
learning_rate=patterns["learning_rate"],
label_smoothing=0.0,
dropout=0.1,
eval_max_predictions=patterns.get("eval_steps", 1000),
use_bfloat16=False,
model=None,
parser=None,
encoder=None,
estimator_prediction_fn=None,
eval=None,
estimator_eval_metrics_fn=estimator_metrics.gen_eval_metrics_fn,
)
if param_overrides:
hparams.parse(param_overrides)
hparams.parser = functools.partial(
parsers.supervised_strings_parser,
hparams.vocab_filename,
hparams.encoder_type,
hparams.max_input_len,
hparams.max_target_len,
length_bucket_size=hparams.length_bucket_size,
length_bucket_start_id=pegasus_params.LENGTH_BUCKET_START_ID,
length_bucket_max_id=pegasus_params.TASK_START_ID - 1,
add_task_id=hparams.add_task_id,
task_start_id=pegasus_params.TASK_START_ID)
hparams.encoder = public_parsing_ops.create_text_encoder(
hparams.encoder_type, hparams.vocab_filename)
hparams.model = functools.partial(
transformer.TransformerEncoderDecoderModel, hparams.encoder.vocab_size,
hparams.hidden_size, hparams.filter_size, hparams.num_heads,
hparams.num_encoder_layers, hparams.num_decoder_layers,
hparams.label_smoothing, hparams.dropout)
beam_keys = ("beam_start", "beam_alpha", "beam_min", "beam_max",
"temperature", "top_k", "top_p")
beam_kwargs = {k: hparams.get(k) for k in beam_keys if k in hparams.values()}
def decode_fn(features):
return hparams.model().predict(features, hparams.max_decode_len,
hparams.beam_size, **beam_kwargs)
hparams.estimator_prediction_fn = decode_fn
hparams.eval = functools.partial(
text_eval.text_eval,
hparams.encoder,
num_reserved=pegasus_params.NUM_RESERVED_TOKENS)
return hparams
@registry.register("MSR")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:MSR-train",
"dev_pattern": "tfds:MSR-train",
"test_pattern": "tfds:MSR-train",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 1000000,
"learning_rate": 0.01,
"batch_size": 128,
}, param_overrides)
@registry.register("PN_Summary")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:PN_Summary-train",
"dev_pattern": "tfds:PN_Summary-validation",
"test_pattern": "tfds:PN_Summary-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 50000,
"learning_rate": 5e-4,
"batch_size": 128,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("tebyan")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:Tebyan-train",
"dev_pattern": "tfds:Tebyan-validation",
"test_pattern": "tfds:Tebyan-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 50000,
"learning_rate": 5e-4,
"batch_size": 128,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("perkey_summary")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:perkey_summary-train",
"dev_pattern": "tfds:perkey_summary-validation",
"test_pattern": "tfds:perkey_summary-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 50000,
"learning_rate": 5e-4,
"batch_size": 128,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("perkey_title")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:perkey_title-train",
"dev_pattern": "tfds:perkey_title-validation",
"test_pattern": "tfds:perkey_title-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 50000,
"learning_rate": 5e-4,
"batch_size": 128,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("parsi_nlu_entailment")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:ParsiNLU_Entailment-train",
"dev_pattern": "tfds:ParsiNLU_Entailment-validation",
"test_pattern": "tfds:ParsiNLU_Entailment-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 20000,
"learning_rate": 5e-4,
"batch_size": 48,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("parsi_nlu_mch")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:ParsiNLU_MCH-train",
"dev_pattern": "tfds:ParsiNLU_MCH-validation",
"test_pattern": "tfds:ParsiNLU_MCH-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 20000,
"learning_rate": 5e-4,
"batch_size": 48,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("parsi_nlu_qqp")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:ParsiNLU_QQP-train",
"dev_pattern": "tfds:ParsiNLU_QQP-validation",
"test_pattern": "tfds:ParsiNLU_QQP-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 20000,
"learning_rate": 5e-4,
"batch_size": 48,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("parsi_nlu_sentence_sentiment_movie")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:ParsiNLU_sentence_sentiment_Movie-train",
"dev_pattern": "tfds:ParsiNLU_sentence_sentiment_Movie-validation",
"test_pattern": "tfds:ParsiNLU_sentence_sentiment_Movie-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 20000,
"learning_rate": 5e-4,
"batch_size": 48,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("parsi_nlu_sentence_sentiment_food")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:ParsiNLU_sentence_sentiment_Food-train",
"dev_pattern": "tfds:ParsiNLU_sentence_sentiment_Food-validation",
"test_pattern": "tfds:ParsiNLU_sentence_sentiment_Food-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 20000,
"learning_rate": 5e-4,
"batch_size": 48,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("wiki_summary_v1")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:wiki_summary_v1-train",
"dev_pattern": "tfds:wiki_summary_v1-validation",
"test_pattern": "tfds:wiki_summary_v1-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 50000,
"learning_rate": 5e-4,
"batch_size": 64,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides)
@registry.register("voa_headlines")
def billsum_transformer(param_overrides):
return transformer_params(
{
"train_pattern": "tfds:VOA_headlines-train",
"dev_pattern": "tfds:VOA_headlines-validation",
"test_pattern": "tfds:VOA_headlines-test",
"max_input_len": 512,
"max_output_len": 256,
"train_steps": 20000,
"learning_rate": 5e-4,
"batch_size": 64,
"label_smoothing" : 0.1,
"beam_alpha" : 0.8
}, param_overrides) | [
[
[
664,
673
],
[
2612,
2621
],
[
3192,
3201
],
[
3892,
3901
]
],
[
[
700,
707
],
[
2637,
2644
]
],
[
[
733,
750
],
[
2491,
2508
]
],
[
[
776,
785
],
[
3917,
3926
]
],
[
[
813,
824
],
[
3217,
3228
]
],
[
[
849,
867
],
[
3081,
3099
]
],
[
[
895,
909
],
[
2871,
2885
],
[
2937,
2951
],
[
3030,
3044
],
[
3980,
3994
]
],
[
[
937,
945
],
[
4036,
4044
],
[
4461,
4469
],
[
4980,
4988
],
[
5483,
5491
],
[
6018,
6026
],
[
6546,
6554
],
[
7101,
7109
],
[
7628,
7636
],
[
8155,
8163
],
[
8766,
8774
],
[
9373,
9381
],
[
9911,
9919
]
],
[
[
977,
1005
],
[
1311,
1327
]
],
[
[
1012,
1030
],
[
4112,
4130
],
[
4544,
4562
],
[
5059,
5077
],
[
5570,
5588
],
[
6103,
6121
],
[
6639,
6657
],
[
7187,
7205
],
[
7714,
7732
],
[
8262,
8280
],
[
8872,
8890
],
[
9461,
9479
],
[
9997,
10015
]
],
[
[
4065,
4084
]
],
[
[
4497,
4516
]
],
[
[
5012,
5031
]
],
[
[
5523,
5542
]
],
[
[
6056,
6075
]
],
[
[
6592,
6611
]
],
[
[
7140,
7159
]
],
[
[
7667,
7686
]
],
[
[
8215,
8234
]
],
[
[
8825,
8844
]
],
[
[
9414,
9433
]
],
[
[
9950,
9969
]
]
] |
from matplotlib import pyplot as plt
import figlatex
import afterpulse_tile21
import textbox
import colormap
vov = 5.5
################
ap21 = afterpulse_tile21.AfterPulseTile21(vov)
fig = plt.figure(num='figlaserpos-0', clear=True, figsize=[4.5, 3])
ap21.sim.hist('mainpos-offset', 'mainnpe==1', fig=fig, selection=False)
ax, = fig.get_axes()
textbox.textbox(ax, f'{vov} VoV', fontsize='medium', loc='lower center')
ax.set_xlabel('Laser peak position [ns]')
figs = [fig]
fig = plt.figure(num='figlaserpos-1', clear=True, figsize=[4.5, 3])
ap21.sim.hist2d('mainpos-offset', 'mainampl', '(mainnpe==1)&(length==128)', fig=fig, cmap=colormap.uniform(), selection=False)
ax, _ = fig.get_axes()
textbox.textbox(ax, f'{vov} VoV', fontsize='medium', loc='lower center')
ax.set_xlabel('Laser peak position [ns]')
ax.set_ylabel('Peak height')
figs.append(fig)
for fig in figs:
fig.tight_layout()
fig.show()
figlatex.save([figs])
| [
[
[
23,
36
],
[
194,
197
],
[
486,
489
]
],
[
[
45,
53
],
[
918,
926
]
],
[
[
61,
78
],
[
147,
164
]
],
[
[
86,
93
],
[
350,
357
],
[
699,
706
]
],
[
[
101,
109
],
[
639,
647
]
],
[
[
111,
114
],
[
182,
185
],
[
373,
376
],
[
722,
725
]
],
[
[
140,
144
],
[
257,
261
],
[
549,
553
]
],
[
[
188,
191
],
[
307,
310
],
[
335,
338
],
[
474,
477
]
],
[
[
329,
331
],
[
366,
368
],
[
423,
425
]
],
[
[
466,
470
],
[
844,
848
],
[
873,
877
],
[
933,
937
]
],
[
[
480,
483
],
[
629,
632
],
[
684,
687
],
[
856,
859
]
],
[
[
676,
678
],
[
715,
717
],
[
772,
774
],
[
814,
816
]
],
[
[
680,
681
]
],
[
[
866,
869
],
[
883,
886
],
[
906,
909
]
]
] |
import random
number = random.randrange(1,10)
guess = input("Guess a number from 1 to 10: ")
guess = int(guess)
if guess == number:
print("Great job! You got it!")
else:
print("Sorry, better luck next time.")
print("The number was " + str(number)) | [
[
[
7,
13
],
[
23,
29
]
],
[
[
14,
20
],
[
124,
130
],
[
251,
257
]
],
[
[
46,
51
],
[
105,
110
]
],
[
[
93,
98
],
[
115,
120
]
]
] |
# Your Romeo API key, required for accessing the RoMEO API
# override this in your local config
ROMEO_API_KEY = ""
ROMEO_API_BASE_URL = "http://www.sherpa.ac.uk/romeo/api29.php"
ROMEO_DOWNLOAD_BASE_URL = "http://www.sherpa.ac.uk/downloads/" | [
[
[
96,
109
]
],
[
[
116,
134
]
],
[
[
180,
203
]
]
] |
from abc import ABC, abstractmethod
from pathlib import Path
from virtool_workflow.data_model import Index
from virtool_workflow.data_model.files import VirtoolFileFormat
class AbstractIndexProvider(ABC):
@abstractmethod
async def get(self) -> Index:
"""Get the current index."""
...
@abstractmethod
async def upload(self, path: Path, format: VirtoolFileFormat) -> Path:
"""Upload a file associated with the index."""
...
@abstractmethod
async def download(self, target_path: Path, *names) -> Path:
"""Download files associated with the index."""
...
@abstractmethod
async def finalize(self):
"""Mark that the index associated with the current job has a json representation of the reference available."""
...
def __await__(self):
return self.get().__await__()
| [
[
[
16,
19
],
[
202,
205
]
],
[
[
21,
35
],
[
214,
228
],
[
318,
332
],
[
481,
495
],
[
635,
649
]
],
[
[
56,
60
],
[
402,
406
],
[
366,
370
],
[
555,
559
],
[
538,
542
]
],
[
[
102,
107
],
[
256,
261
]
],
[
[
154,
171
],
[
380,
397
]
],
[
[
180,
201
]
]
] |
import os
import io
import struct
import bson
class Packet:
def __init__(self, packet_id=0, status_code=0, packet_name="", body_type=0, body=b""):
self.packet_id = packet_id
self.status_code = status_code
self.packet_name = packet_name
self.body_type = body_type
self.body_size = 0
self.body = body
def to_loco_packet(self):
f = io.BytesIO()
f.write(struct.pack("<I", self.packet_id))
f.write(struct.pack("<H", self.status_code))
if (11-len(self.packet_name)) < 0:
raise Exception("invalid packetName")
f.write(self.packet_name.encode("utf-8"))
f.write(b"\x00" * (11 - len(self.packet_name)))
f.write(struct.pack("<b", self.body_type))
f.write(struct.pack("<i", len(self.body)))
f.write(self.body)
return f.getvalue()
def read_loco_packet(self, packet):
self.packet_id = struct.unpack("<I", packet[:4])[0]
self.status_code = struct.unpack("<H", packet[4:6])[0]
self.packet_name = packet[6:17].decode().replace("\0", "")
self.body_type = struct.unpack("<b", packet[17:18])[0]
self.body_size = struct.unpack("<i", packet[18:22])[0]
self.body = packet[22:]
def to_encrypted_loco_packet(self, crypto):
iv = os.urandom(16)
encrypted_packet = crypto.aes_encrypt(self.to_loco_packet(), iv)
f = io.BytesIO()
f.write(struct.pack("<I", len(encrypted_packet)+len(iv)))
f.write(iv)
f.write(encrypted_packet)
return f.getvalue()
def read_encrypted_loco_packet(self, packet, crypto):
packetLen = struct.unpack(">I", packet[0:4])[0]
iv = packet[4:20]
data = packet[20:packetLen-16]
dec = crypto.aes_decrypt(data, iv)
try:
self.read_loco_packet(dec)
except Exception as e:
print(str(e))
def to_json_body(self):
return bson.decode(self.body)
| [
[
[
7,
9
],
[
1325,
1327
]
],
[
[
17,
19
],
[
397,
399
],
[
1426,
1428
]
],
[
[
27,
33
],
[
426,
432
],
[
477,
483
],
[
732,
738
],
[
783,
789
],
[
940,
946
],
[
1002,
1008
],
[
1130,
1136
],
[
1193,
1199
],
[
1455,
1461
],
[
1667,
1673
]
],
[
[
42,
46
],
[
1966,
1970
]
],
[
[
55,
61
]
]
] |
"""
This test module will only run on a POSIX system. Windows support *may* be added at some point in the future.
"""
# Global imports
import json, operator, os, signal, sys
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from time import sleep
from time import time
# local imports
from surfdebugnode import DebugNode
from surfapi.surfnoderpc import SurfNodeRPC
WAITING = True
def main( ):
global WAITING
"""
This example contains a simple parser to obtain the locations of both surfd and the data directory,
creates and runs a new debug node, replays all of the blocks in the data directory, and finally waits
for the user to interface with it outside of the script. Sending SIGINT succesfully and cleanly terminates
the program.
"""
import os, signal, sys
from argparse import ArgumentParser
if( os.name != "posix" ):
print( "This script only works on POSIX systems" )
return
parser = ArgumentParser( description='Run a Debug Node on an existing chain. This simply replays all blocks ' + \
'and then waits indefinitely to allow user interaction through RPC calls and ' + \
'the CLI wallet' )
parser.add_argument( '--surfd', '-s', type=str, required=True, help='The location of a surfd binary to run the debug node' )
parser.add_argument( '--data-dir', '-d', type=str, required=True, help='The location of an existing data directory. ' + \
'The debug node will pull blocks from this directory when replaying the chain. The directory ' + \
'will not be changed.' )
parser.add_argument( '--plugins', '-p', type=str, required=False, help='A list of plugins to load. witness and ' + \
'debug_node are always loaded.' )
parser.add_argument( '--apis', '-a', type=str, required=False, help='A list of apis to load. database_api, login_api, ' + \
'and debug_node_api are always loaded' )
args = parser.parse_args()
surfd = Path( args.surfd )
if( not surfd.exists() ):
print( 'Error: surfd does not exist.' )
return
surfd = surfd.resolve()
if( not surfd.is_file() ):
print( 'Error: surfd is not a file.' )
return
data_dir = Path( args.data_dir )
if( not data_dir.exists() ):
print( 'Error: data_dir does not exist or is not a properly constructed surfd data directory' )
data_dir = data_dir.resolve()
if( not data_dir.is_dir() ):
print( 'Error: data_dir is not a directory' )
plugins = list()
if( args.plugins ):
plugins = args.plugins.split()
apis = list()
if( args.apis ):
apis = args.apis.split()
signal.signal( signal.SIGINT, sigint_handler )
print( 'Creating and starting debug node' )
debug_node = DebugNode( str( surfd ), str( data_dir ), plugins=plugins, apis=apis, args='--replay', surfd_err=sys.stderr )
with debug_node:
debug_node.debug_generate_blocks_until( int( time() ), True )
debug_node.debug_set_hardfork( 14 )
print( 'Done!' )
print( 'Feel free to interact with this node via RPC calls for the cli wallet.' )
print( 'To shutdown the node, send SIGINT with Ctrl + C to this script. It will shut down safely.' )
while( WAITING ):
assert( debug_node.debug_generate_blocks( 1 ) == 1 )
sleep( 3 )
def sigint_handler( signum, frame ):
global WAITING
WAITING = False
sleep( 3 )
sys.exit( 0 )
main() | [
[
[
142,
146
]
],
[
[
148,
156
]
],
[
[
158,
160
]
],
[
[
162,
168
]
],
[
[
170,
173
],
[
3523,
3526
]
],
[
[
196,
210
]
],
[
[
232,
240
]
],
[
[
261,
265
],
[
2087,
2091
],
[
2325,
2329
]
],
[
[
283,
288
],
[
3420,
3425
],
[
3509,
3514
]
],
[
[
306,
310
],
[
3046,
3050
]
],
[
[
354,
363
],
[
2864,
2873
]
],
[
[
396,
407
]
],
[
[
409,
416
],
[
3337,
3344
]
],
[
[
429,
433
],
[
3538,
3542
]
],
[
[
3436,
3450
],
[
2783,
2797
]
],
[
[
3490,
3497
]
]
] |
"""
Bundesagentur für Arbeit: Jobsuche API
Die größte Stellendatenbank Deutschlands durchsuchen, Details zu Stellenanzeigen und Informationen über Arbeitgeber abrufen. <br><br> Die Authentifizierung funktioniert per OAuth 2 Client Credentials mit JWTs. Folgende Client-Credentials können dafür verwendet werden:<br><br> **ClientID:** c003a37f-024f-462a-b36d-b001be4cd24a <br> **ClientSecret:** 32a39620-32b3-4307-9aa1-511e3d7f48a8 # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from deutschland.jobsuche.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from deutschland.jobsuche.exceptions import ApiAttributeError
def lazy_import():
from deutschland.jobsuche.model.job_search_response_embedded_jobs import (
JobSearchResponseEmbeddedJobs,
)
globals()["JobSearchResponseEmbeddedJobs"] = JobSearchResponseEmbeddedJobs
class JobSearchResponseEmbedded(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"jobs": ([JobSearchResponseEmbeddedJobs],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"jobs": "jobs", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""JobSearchResponseEmbedded - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
jobs ([JobSearchResponseEmbeddedJobs]): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""JobSearchResponseEmbedded - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
jobs ([JobSearchResponseEmbeddedJobs]): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(
f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes."
)
| [
[
[
564,
566
]
],
[
[
588,
591
]
],
[
[
672,
684
],
[
6494,
6506
],
[
10648,
10660
]
],
[
[
690,
703
]
],
[
[
709,
720
],
[
1274,
1285
]
],
[
[
726,
737
]
],
[
[
743,
758
],
[
2476,
2491
],
[
2953,
2968
],
[
3406,
3421
]
],
[
[
764,
788
]
],
[
[
794,
824
],
[
3610,
3640
],
[
7827,
7857
]
],
[
[
830,
834
],
[
2760,
2764
]
],
[
[
840,
848
],
[
2778,
2786
]
],
[
[
854,
863
]
],
[
[
869,
878
],
[
2889,
2898
]
],
[
[
884,
910
]
],
[
[
940,
952
],
[
6426,
6438
]
],
[
[
997,
1014
],
[
11776,
11793
]
],
[
[
1021,
1032
],
[
2699,
2710
],
[
3289,
3300
]
],
[
[
1248,
1273
]
]
] |
TESTBED_TEMPLATE = \
r"""
{
"name" : "WCB Test",
"description" : "Run WCB for {{ duration_minutes }} minutes",
"start_time" : "{{ start_time }}",
"duration" : {{ duration_seconds }},
"binaries" : {
"hardware" : "firefly",
"bin_file": "{{ abs_bin_path }}",
"programAddress": "0x00200000",
"targets": {{ targets }}
},
"logs": 0,
"orchestrator" : {
"type" : "python",
"file" : "TO BE MODIFIED: (absolute) path to tcp_orchestrator.py",
"init" : "init_test",
"init_kargs" : {
"scenario" : "{{ version }}",
"seed" : {{ seed }}
},
"run" : "run_test"
},
"extra_files" : "TO BE MODIFIED: (absolute) path to the data.mat file were several control variables (e.g., K, Nt, Qt, ...) are defined"
}
""" | [
[
[
0,
16
]
]
] |
"""
Generates a cluster using a plummer model with a salpeter Initial Mass Function.
Compares the generated IMF against the expected line.
"""
import numpy
from matplotlib import pyplot
from amuse.units import units
from amuse.units import nbody_system
from amuse.ic.plummer import new_plummer_model
from amuse.ic.salpeter import new_salpeter_mass_distribution
def new_cluster(number_of_stars = 1000):
masses = new_salpeter_mass_distribution(
number_of_stars,
mass_min = 0.1 | units.MSun,
mass_max = 125.0 | units.MSun,
alpha = -2.35
)
nbody_converter = nbody_system.nbody_to_si(masses.sum(), 1 | units.parsec)
particles = new_plummer_model(number_of_stars, nbody_converter)
particles.mass = masses
particles.move_to_center()
return particles
def plot_particles_and_mass_distribution(particles):
figure = pyplot.figure(figsize= (12,6))
subplot = figure.add_subplot(1, 2, 1)
subplot.scatter(
particles.x.value_in(units.parsec),
particles.y.value_in(units.parsec),
s = particles.mass.value_in(units.MSun),# * len(particles),
edgecolors = 'red',
facecolors = 'red'
)
subplot.set_xlim(-4,4)
subplot.set_ylim(-4,4)
subplot.set_xlabel('x (parsec)')
subplot.set_ylabel('y (parsec)')
subplot = figure.add_subplot(1, 2, 2)
masses = particles.mass.value_in(units.MSun)
bins = 10**numpy.linspace(-1, 2, 100)
number_of_particles, bin_edges= numpy.histogram(masses, bins = bins)
bin_sizes = bin_edges[1:] - bin_edges[:-1]
y = number_of_particles / bin_sizes
x = (bin_edges[1:] + bin_edges[:-1]) / 2.0
y = y[number_of_particles > 10.0]
x = x[number_of_particles > 10.0]
subplot.scatter(x, y)
c = ((0.1**-1.35) - (125.0**-1.35)) / 1.35
subplot.plot(x, len(particles)/ c * (x**-2.35))
subplot.set_xscale('log')
subplot.set_yscale('log')
subplot.set_xlabel(u'M [M\u2299]')
subplot.set_ylabel('N')
pyplot.show()
if __name__ == "__main__":
particles = new_cluster(20000)
plot_particles_and_mass_distribution(particles)
| [
[
[
151,
156
],
[
1454,
1459
],
[
1517,
1522
]
],
[
[
181,
187
],
[
875,
881
],
[
2056,
2062
]
],
[
[
212,
217
],
[
501,
506
],
[
540,
545
],
[
646,
651
],
[
1012,
1017
],
[
1056,
1061
],
[
1107,
1112
],
[
1422,
1427
]
],
[
[
242,
254
],
[
603,
615
]
],
[
[
284,
301
],
[
676,
693
]
],
[
[
332,
362
],
[
418,
448
]
],
[
[
368,
379
],
[
2118,
2129
]
],
[
[
813,
849
],
[
2141,
2177
]
],
[
[
2106,
2115
],
[
2178,
2187
]
]
] |
import torch
import numpy
# codes of this function are borrowed from https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/models/pointnet2_utils.py
def index_points(device, points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
# batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def knn_l2(device, net, k, u):
'''
Input:
k: int32, number of k in k-nn search
net: (batch_size, npoint, c) float32 array, points
u: int32, block size
Output:
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
INF = 1e8
batch_size = net.size(0)
npoint = net.size(1)
n_channel = net.size(2)
square = torch.pow(torch.norm(net, dim=2,keepdim=True),2)
def u_block(batch_size, npoint, u):
block = numpy.zeros([batch_size, npoint, npoint])
n = npoint // u
for i in range(n):
block[:, (i*u):(i*u+u), (i*u):(i*u+u)] = numpy.ones([batch_size, u, u]) * (-INF)
return block
# minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).to(device)
minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).cuda()
_, indices = torch.topk(minus_distance, k, largest=True, sorted=False)
return indices
| [
[
[
7,
12
],
[
666,
671
],
[
688,
693
],
[
1202,
1207
],
[
1212,
1217
],
[
1697,
1702
],
[
1770,
1775
],
[
1839,
1844
]
],
[
[
20,
25
],
[
1308,
1313
],
[
1454,
1459
]
],
[
[
165,
177
]
],
[
[
819,
825
]
]
] |
import numpy as np
from torch import Tensor, FloatTensor
from kospeech.data.audio.core import load_audio
from kospeech.data.audio.augment import NoiseInjector, SpecAugment
from kospeech.data.audio.feature import MelSpectrogram, MFCC, Spectrogram, FilterBank
class AudioParser(object):
"""
Provides inteface of audio parser.
Note:
Do not use this class directly, use one of the sub classes.
Method:
- **parse_audio()**: abstract method. you have to override this method.
- **parse_transcript()**: abstract method. you have to override this method.
"""
def __init__(self, dataset_path, noiseset_size, sample_rate=16000, noise_level=0.7, noise_augment=False):
if noise_augment:
self.noise_injector = NoiseInjector(dataset_path, noiseset_size, sample_rate, noise_level)
def parse_audio(self, *args, **kwargs):
raise NotImplementedError
def parse_transcript(self, *args, **kwargs):
raise NotImplementedError
class SpectrogramParser(AudioParser):
"""
Parses audio file into (spectrogram / mel spectrogram / mfcc) with various options.
Args:
transform_method (str): which feature to use (default: mel)
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 40)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction(default: librosa)
del_silence (bool): flag indication whether to delete silence or not (default: True)
input_reverse (bool): flag indication whether to reverse input or not (default: True)
normalize (bool): flag indication whether to normalize spectrum or not (default:True)
time_mask_para (int): Hyper Parameter for Time Masking to limit time masking length
freq_mask_para (int): Hyper Parameter for Freq Masking to limit freq masking length
time_mask_num (int): how many time-masked area to make
freq_mask_num (int): how many freq-masked area to make
sos_id (int): start of sentence token`s identification
eos_id (int): end of sentence token`s identification
target_dict (dict): dictionary of filename and labels
"""
VANILLA = 0 # Not apply augmentation
SPEC_AUGMENT = 1 # SpecAugment
NOISE_INJECTION = 2 # Noise Injection
HYBRID_AUGMENT = 3 # Noise Injection & SpecAugment
def __init__(self, feature_extract_by: str = 'librosa', sample_rate: int = 16000,
n_mels: int = 80, frame_length: int = 20, frame_shift: int = 10,
del_silence: bool = False, input_reverse: bool = True,
normalize: bool = False, transform_method: str = 'mel',
time_mask_para: int = 70, freq_mask_para: int = 12, time_mask_num: int = 2, freq_mask_num: int = 2,
sos_id: int = 1, eos_id: int = 2, target_dict: dict = None, noise_augment: bool = False,
dataset_path: str = None, noiseset_size: int = 0, noise_level: float = 0.7) -> None:
super(SpectrogramParser, self).__init__(dataset_path, noiseset_size, sample_rate, noise_level, noise_augment)
self.del_silence = del_silence
self.input_reverse = input_reverse
self.normalize = normalize
self.sos_id = sos_id
self.eos_id = eos_id
self.target_dict = target_dict
self.spec_augment = SpecAugment(time_mask_para, freq_mask_para, time_mask_num, freq_mask_num)
if transform_method.lower() == 'mel':
self.transforms = MelSpectrogram(sample_rate, n_mels, frame_length, frame_shift, feature_extract_by)
elif transform_method.lower() == 'mfcc':
self.transforms = MFCC(sample_rate, n_mels, frame_length, frame_shift, feature_extract_by)
elif transform_method.lower() == 'spect':
self.transforms = Spectrogram(sample_rate, frame_length, frame_shift, feature_extract_by)
elif transform_method.lower() == 'fbank':
self.transforms = FilterBank(sample_rate, n_mels, frame_length, frame_shift)
else:
raise ValueError("Unsupported feature : {0}".format(transform_method))
def parse_audio(self, audio_path: str, augment_method: int) -> Tensor:
"""
Parses audio.
Args:
audio_path (str): path of audio file
augment_method (int): flag indication which augmentation method to use.
Returns: feature_vector
- **feature_vector** (torch.FloatTensor): feature from audio file.
"""
signal = load_audio(audio_path, self.del_silence)
if signal is None:
return None
if augment_method == SpectrogramParser.NOISE_INJECTION or augment_method == SpectrogramParser.HYBRID_AUGMENT:
signal = self.noise_injector(signal)
feature_vector = self.transforms(signal)
if self.normalize:
feature_vector -= feature_vector.mean()
if self.input_reverse: # Refer to "Sequence to Sequence Learning with Neural Network" paper
feature_vector = feature_vector[:, ::-1]
feature_vector = FloatTensor(np.ascontiguousarray(np.swapaxes(feature_vector, 0, 1)))
else:
feature_vector = FloatTensor(feature_vector).transpose(0, 1)
if augment_method == SpectrogramParser.SPEC_AUGMENT or augment_method == SpectrogramParser.HYBRID_AUGMENT:
feature_vector = self.spec_augment(feature_vector)
return feature_vector
def parse_transcript(self, *args, **kwargs):
raise NotImplementedError
| [
[
[
7,
18
],
[
5363,
5365
],
[
5384,
5386
]
],
[
[
37,
43
],
[
4443,
4449
]
],
[
[
45,
56
],
[
5351,
5362
],
[
5463,
5474
]
],
[
[
94,
104
],
[
4776,
4786
]
],
[
[
145,
158
],
[
769,
782
]
],
[
[
160,
171
],
[
3597,
3608
]
],
[
[
212,
226
],
[
3748,
3762
]
],
[
[
228,
232
],
[
3911,
3915
]
],
[
[
234,
245
],
[
4065,
4076
]
],
[
[
247,
257
],
[
4218,
4228
]
],
[
[
266,
277
],
[
1027,
1038
]
],
[
[
1009,
1026
],
[
3251,
3268
],
[
4899,
4916
],
[
4954,
4971
],
[
5537,
5554
],
[
5589,
5606
]
]
] |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import vlag
import snmp
import switchport_basic
import switchport
import mac_learning
import ip
import ipv6
import track
import edge_loop_detection
import fcoeport
import mac
import hide_vrrp_holer
import ip_acl_interface
import service_policy
import port_profile_to_interface_associations
import qos
import vlan
import bpdu_drop
import tunnel
import spanning_tree
class port_channel(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The list of port-channels in the managed device. Each
entry represents a port-channel.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__cee','__vlag','__po_speed','__ifindex','__description','__shutdown','__minimum_links','__snmp','__mtu','__switchport_basic','__switchport','__mac_learning','__ip','__ipv6','__track','__edge_loop_detection','__load_balance','__fcoeport','__mac','__hide_vrrp_holer','__ip_acl_interface','__service_policy','__port_profile_port','__port_profile_to_interface_associations','__priority_tag_enable','__qos','__vlan','__bpdu_drop','__tunnel','__spanning_tree',)
_yang_name = 'port-channel'
_rest_name = 'Port-channel'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__spanning_tree = YANGDynClass(base=spanning_tree.spanning_tree, is_container='container', presence=False, yang_name="spanning-tree", rest_name="spanning-tree", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Spanning tree commands', u'sort-priority': u'98', u'callpoint': u'po-stp-config', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
self.__minimum_links = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'callpoint': u'interface_po', u'cli-completion-actionpoint': u'getinterfaceall-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)
self.__service_policy = YANGDynClass(base=service_policy.service_policy, is_container='container', presence=False, yang_name="service-policy", rest_name="service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Input/Output Policy Map', u'callpoint': u'interface_po', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)
self.__ip_acl_interface = YANGDynClass(base=ip_acl_interface.ip_acl_interface, is_container='container', presence=False, yang_name="ip-acl-interface", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'109'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)
self.__ip = YANGDynClass(base=ip.ip, is_container='container', presence=False, yang_name="ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol (IP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__edge_loop_detection = YANGDynClass(base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__cee = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="cee", rest_name="cee", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u"Apply default CEE map 'default'"}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='cee:cee-map-name-type', is_config=True)
self.__shutdown = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="shutdown", rest_name="shutdown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Shutdown the selected interface', u'cli-show-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_SHUT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)
self.__qos = YANGDynClass(base=qos.qos, is_container='container', presence=False, yang_name="qos", rest_name="qos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Quality of Service (QoS)', u'cli-incomplete-no': None, u'callpoint': u'interface_po', u'sort-priority': u'93'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)
self.__switchport_basic = YANGDynClass(base=switchport_basic.switchport_basic, is_container='container', presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__hide_vrrp_holer = YANGDynClass(base=hide_vrrp_holer.hide_vrrp_holer, is_container='container', presence=False, yang_name="hide-vrrp-holer", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'/vcsmode/vcs-mode = "false"'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
self.__port_profile_port = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="port-profile-port", rest_name="port-profile-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'114'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True)
self.__snmp = YANGDynClass(base=snmp.snmp, is_container='container', presence=False, yang_name="snmp", rest_name="snmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Simple Network Management Protocol (SNMP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__bpdu_drop = YANGDynClass(base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
self.__port_profile_to_interface_associations = YANGDynClass(base=port_profile_to_interface_associations.port_profile_to_interface_associations, is_container='container', presence=False, yang_name="port-profile-to-interface-associations", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)
self.__ipv6 = YANGDynClass(base=ipv6.ipv6, is_container='container', presence=False, yang_name="ipv6", rest_name="ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol version 6(IPv6).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__vlag = YANGDynClass(base=vlag.vlag, is_container='container', presence=False, yang_name="vlag", rest_name="vlag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual LAG', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__description = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1 .. 63']}), is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface specific description', u'cli-multi-value': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=True)
self.__track = YANGDynClass(base=track.track, is_container='container', presence=False, yang_name="track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__vlan = YANGDynClass(base=vlan.vlan, is_container='container', presence=False, yang_name="vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlan commands', u'cli-incomplete-no': None, u'callpoint': u'VlanClassifierActivateCallpointWorker_po', u'sort-priority': u'97'}}, namespace='urn:brocade.com:mgmt:brocade-vlan', defining_module='brocade-vlan', yang_type='container', is_config=True)
self.__mac_learning = YANGDynClass(base=mac_learning.mac_learning, is_container='container', presence=False, yang_name="mac-learning", rest_name="mac-learning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC learning.', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_LEARNING_DISABLE_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__mac = YANGDynClass(base=mac.mac, is_container='container', presence=False, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC parameters', u'callpoint': u'MacaclAccessgroupIntPoCP', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_ACL_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)
self.__load_balance = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'src-dst-ip-port': {'value': 6}, u'src-mac-vid': {'value': 2}, u'src-dst-ip': {'value': 4}, u'src-dst-ip-mac-vid': {'value': 5}, u'dst-mac-vid': {'value': 1}, u'src-dst-mac-vid': {'value': 3}, u'src-dst-ip-mac-vid-port': {'value': 7}},), default=unicode("src-dst-ip-mac-vid-port"), is_leaf=True, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing Commands'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
self.__po_speed = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'40000': {'value': 3}, u'100': {'value': 5}, u'10000': {'value': 2}, u'100000': {'value': 4}, u'1000': {'value': 1}},), default=unicode("10000"), is_leaf=True, yang_name="po-speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set speed informational parameter', u'alt-name': u'speed'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
self.__fcoeport = YANGDynClass(base=fcoeport.fcoeport, is_container='container', presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the LAG to enable FCoE', u'display-when': u'(/vcsmode/vcs-mode = "true") or (/fcoe-fsb/fcoe-fsb-enable)', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_FEATURE_FCOE', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'fcoeport_attr_lag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
self.__name = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-custom-range': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='portchannel-type', is_config=True)
self.__switchport = YANGDynClass(base=switchport.switchport, is_container='container', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__tunnel = YANGDynClass(base=tunnel.tunnel, is_container='container', presence=False, yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'info': u'Tunneling parameters'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
self.__mtu = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1522..9216']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2500), is_leaf=True, yang_name="mtu", rest_name="mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set mtu value to interface'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='mtu-type', is_config=True)
self.__ifindex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="ifindex", rest_name="ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint64', is_config=False)
self.__priority_tag_enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="priority-tag-enable", rest_name="priority-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure 802.1p priority tagging', u'cli-full-command': None, u'callpoint': u'interface_po', u'alt-name': u'priority-tag'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /interface/port_channel/name (portchannel-type)
YANG Description: The port-channel identifier.
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /interface/port_channel/name (portchannel-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: The port-channel identifier.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-custom-range': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='portchannel-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with portchannel-type""",
'defined-type': "brocade-interface:portchannel-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-custom-range': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='portchannel-type', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..6144']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-custom-range': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='portchannel-type', is_config=True)
def _get_cee(self):
"""
Getter method for cee, mapped from YANG variable /interface/port_channel/cee (cee:cee-map-name-type)
YANG Description: The CEE map associated with this port-channel
interface.
"""
return self.__cee
def _set_cee(self, v, load=False):
"""
Setter method for cee, mapped from YANG variable /interface/port_channel/cee (cee:cee-map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_cee is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cee() directly.
YANG Description: The CEE map associated with this port-channel
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="cee", rest_name="cee", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u"Apply default CEE map 'default'"}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='cee:cee-map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cee must be of a type compatible with cee:cee-map-name-type""",
'defined-type': "cee:cee-map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="cee", rest_name="cee", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u"Apply default CEE map 'default'"}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='cee:cee-map-name-type', is_config=True)""",
})
self.__cee = t
if hasattr(self, '_set'):
self._set()
def _unset_cee(self):
self.__cee = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="cee", rest_name="cee", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u"Apply default CEE map 'default'"}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='cee:cee-map-name-type', is_config=True)
def _get_vlag(self):
"""
Getter method for vlag, mapped from YANG variable /interface/port_channel/vlag (container)
YANG Description: The vLAG properties for this port-channel.
"""
return self.__vlag
def _set_vlag(self, v, load=False):
"""
Setter method for vlag, mapped from YANG variable /interface/port_channel/vlag (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlag() directly.
YANG Description: The vLAG properties for this port-channel.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlag.vlag, is_container='container', presence=False, yang_name="vlag", rest_name="vlag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual LAG', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlag must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlag.vlag, is_container='container', presence=False, yang_name="vlag", rest_name="vlag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual LAG', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__vlag = t
if hasattr(self, '_set'):
self._set()
def _unset_vlag(self):
self.__vlag = YANGDynClass(base=vlag.vlag, is_container='container', presence=False, yang_name="vlag", rest_name="vlag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual LAG', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_po_speed(self):
"""
Getter method for po_speed, mapped from YANG variable /interface/port_channel/po_speed (enumeration)
YANG Description: This specifies the administratively configured
bandwidth for this physical interface.
"""
return self.__po_speed
def _set_po_speed(self, v, load=False):
"""
Setter method for po_speed, mapped from YANG variable /interface/port_channel/po_speed (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_po_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_po_speed() directly.
YANG Description: This specifies the administratively configured
bandwidth for this physical interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'40000': {'value': 3}, u'100': {'value': 5}, u'10000': {'value': 2}, u'100000': {'value': 4}, u'1000': {'value': 1}},), default=unicode("10000"), is_leaf=True, yang_name="po-speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set speed informational parameter', u'alt-name': u'speed'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """po_speed must be of a type compatible with enumeration""",
'defined-type': "brocade-interface:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'40000': {'value': 3}, u'100': {'value': 5}, u'10000': {'value': 2}, u'100000': {'value': 4}, u'1000': {'value': 1}},), default=unicode("10000"), is_leaf=True, yang_name="po-speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set speed informational parameter', u'alt-name': u'speed'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)""",
})
self.__po_speed = t
if hasattr(self, '_set'):
self._set()
def _unset_po_speed(self):
self.__po_speed = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'40000': {'value': 3}, u'100': {'value': 5}, u'10000': {'value': 2}, u'100000': {'value': 4}, u'1000': {'value': 1}},), default=unicode("10000"), is_leaf=True, yang_name="po-speed", rest_name="speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set speed informational parameter', u'alt-name': u'speed'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
def _get_ifindex(self):
"""
Getter method for ifindex, mapped from YANG variable /interface/port_channel/ifindex (uint64)
"""
return self.__ifindex
def _set_ifindex(self, v, load=False):
"""
Setter method for ifindex, mapped from YANG variable /interface/port_channel/ifindex (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_ifindex is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ifindex() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="ifindex", rest_name="ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ifindex must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="ifindex", rest_name="ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint64', is_config=False)""",
})
self.__ifindex = t
if hasattr(self, '_set'):
self._set()
def _unset_ifindex(self):
self.__ifindex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="ifindex", rest_name="ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint64', is_config=False)
def _get_description(self):
"""
Getter method for description, mapped from YANG variable /interface/port_channel/description (string)
"""
return self.__description
def _set_description(self, v, load=False):
"""
Setter method for description, mapped from YANG variable /interface/port_channel/description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_description() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1 .. 63']}), is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface specific description', u'cli-multi-value': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """description must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1 .. 63']}), is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface specific description', u'cli-multi-value': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=True)""",
})
self.__description = t
if hasattr(self, '_set'):
self._set()
def _unset_description(self):
self.__description = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1 .. 63']}), is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface specific description', u'cli-multi-value': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=True)
def _get_shutdown(self):
"""
Getter method for shutdown, mapped from YANG variable /interface/port_channel/shutdown (empty)
"""
return self.__shutdown
def _set_shutdown(self, v, load=False):
"""
Setter method for shutdown, mapped from YANG variable /interface/port_channel/shutdown (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="shutdown", rest_name="shutdown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Shutdown the selected interface', u'cli-show-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_SHUT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """shutdown must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="shutdown", rest_name="shutdown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Shutdown the selected interface', u'cli-show-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_SHUT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)""",
})
self.__shutdown = t
if hasattr(self, '_set'):
self._set()
def _unset_shutdown(self):
self.__shutdown = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="shutdown", rest_name="shutdown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Shutdown the selected interface', u'cli-show-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_SHUT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)
def _get_minimum_links(self):
"""
Getter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32)
YANG Description: The least number of operationally 'UP' links to
indicate port-channel being UP.
"""
return self.__minimum_links
def _set_minimum_links(self, v, load=False):
"""
Setter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_minimum_links is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_minimum_links() directly.
YANG Description: The least number of operationally 'UP' links to
indicate port-channel being UP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'callpoint': u'interface_po', u'cli-completion-actionpoint': u'getinterfaceall-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """minimum_links must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'callpoint': u'interface_po', u'cli-completion-actionpoint': u'getinterfaceall-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)""",
})
self.__minimum_links = t
if hasattr(self, '_set'):
self._set()
def _unset_minimum_links(self):
self.__minimum_links = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'callpoint': u'interface_po', u'cli-completion-actionpoint': u'getinterfaceall-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)
def _get_snmp(self):
"""
Getter method for snmp, mapped from YANG variable /interface/port_channel/snmp (container)
YANG Description: The SNMP configurations for an interface.
"""
return self.__snmp
def _set_snmp(self, v, load=False):
"""
Setter method for snmp, mapped from YANG variable /interface/port_channel/snmp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_snmp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_snmp() directly.
YANG Description: The SNMP configurations for an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=snmp.snmp, is_container='container', presence=False, yang_name="snmp", rest_name="snmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Simple Network Management Protocol (SNMP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """snmp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=snmp.snmp, is_container='container', presence=False, yang_name="snmp", rest_name="snmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Simple Network Management Protocol (SNMP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__snmp = t
if hasattr(self, '_set'):
self._set()
def _unset_snmp(self):
self.__snmp = YANGDynClass(base=snmp.snmp, is_container='container', presence=False, yang_name="snmp", rest_name="snmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Simple Network Management Protocol (SNMP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_mtu(self):
"""
Getter method for mtu, mapped from YANG variable /interface/port_channel/mtu (mtu-type)
YANG Description: The size of the largest packet which can be sent/
received on the interface, specified in bytes.
For interfaces that are used for transmitting network
datagrams, this is the size of the largest network
datagram that can be sent on the interface.
"""
return self.__mtu
def _set_mtu(self, v, load=False):
"""
Setter method for mtu, mapped from YANG variable /interface/port_channel/mtu (mtu-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mtu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mtu() directly.
YANG Description: The size of the largest packet which can be sent/
received on the interface, specified in bytes.
For interfaces that are used for transmitting network
datagrams, this is the size of the largest network
datagram that can be sent on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1522..9216']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2500), is_leaf=True, yang_name="mtu", rest_name="mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set mtu value to interface'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='mtu-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mtu must be of a type compatible with mtu-type""",
'defined-type': "brocade-interface:mtu-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1522..9216']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2500), is_leaf=True, yang_name="mtu", rest_name="mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set mtu value to interface'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='mtu-type', is_config=True)""",
})
self.__mtu = t
if hasattr(self, '_set'):
self._set()
def _unset_mtu(self):
self.__mtu = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1522..9216']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(2500), is_leaf=True, yang_name="mtu", rest_name="mtu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set mtu value to interface'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='mtu-type', is_config=True)
def _get_switchport_basic(self):
"""
Getter method for switchport_basic, mapped from YANG variable /interface/port_channel/switchport_basic (container)
"""
return self.__switchport_basic
def _set_switchport_basic(self, v, load=False):
"""
Setter method for switchport_basic, mapped from YANG variable /interface/port_channel/switchport_basic (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_switchport_basic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_switchport_basic() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=switchport_basic.switchport_basic, is_container='container', presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """switchport_basic must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=switchport_basic.switchport_basic, is_container='container', presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__switchport_basic = t
if hasattr(self, '_set'):
self._set()
def _unset_switchport_basic(self):
self.__switchport_basic = YANGDynClass(base=switchport_basic.switchport_basic, is_container='container', presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_switchport(self):
"""
Getter method for switchport, mapped from YANG variable /interface/port_channel/switchport (container)
YANG Description: The L2 switching characteristics of an interface.
"""
return self.__switchport
def _set_switchport(self, v, load=False):
"""
Setter method for switchport, mapped from YANG variable /interface/port_channel/switchport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_switchport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_switchport() directly.
YANG Description: The L2 switching characteristics of an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=switchport.switchport, is_container='container', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """switchport must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=switchport.switchport, is_container='container', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__switchport = t
if hasattr(self, '_set'):
self._set()
def _unset_switchport(self):
self.__switchport = YANGDynClass(base=switchport.switchport, is_container='container', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_mac_learning(self):
"""
Getter method for mac_learning, mapped from YANG variable /interface/port_channel/mac_learning (container)
"""
return self.__mac_learning
def _set_mac_learning(self, v, load=False):
"""
Setter method for mac_learning, mapped from YANG variable /interface/port_channel/mac_learning (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_learning is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_learning() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mac_learning.mac_learning, is_container='container', presence=False, yang_name="mac-learning", rest_name="mac-learning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC learning.', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_LEARNING_DISABLE_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_learning must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mac_learning.mac_learning, is_container='container', presence=False, yang_name="mac-learning", rest_name="mac-learning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC learning.', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_LEARNING_DISABLE_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__mac_learning = t
if hasattr(self, '_set'):
self._set()
def _unset_mac_learning(self):
self.__mac_learning = YANGDynClass(base=mac_learning.mac_learning, is_container='container', presence=False, yang_name="mac-learning", rest_name="mac-learning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC learning.', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_LEARNING_DISABLE_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_ip(self):
"""
Getter method for ip, mapped from YANG variable /interface/port_channel/ip (container)
YANG Description: The IP configurations for an interface.
"""
return self.__ip
def _set_ip(self, v, load=False):
"""
Setter method for ip, mapped from YANG variable /interface/port_channel/ip (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip() directly.
YANG Description: The IP configurations for an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ip.ip, is_container='container', presence=False, yang_name="ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol (IP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ip.ip, is_container='container', presence=False, yang_name="ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol (IP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__ip = t
if hasattr(self, '_set'):
self._set()
def _unset_ip(self):
self.__ip = YANGDynClass(base=ip.ip, is_container='container', presence=False, yang_name="ip", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol (IP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_ipv6(self):
"""
Getter method for ipv6, mapped from YANG variable /interface/port_channel/ipv6 (container)
YANG Description: The IPv6 configurations for an interface.
"""
return self.__ipv6
def _set_ipv6(self, v, load=False):
"""
Setter method for ipv6, mapped from YANG variable /interface/port_channel/ipv6 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6() directly.
YANG Description: The IPv6 configurations for an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6.ipv6, is_container='container', presence=False, yang_name="ipv6", rest_name="ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol version 6(IPv6).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipv6.ipv6, is_container='container', presence=False, yang_name="ipv6", rest_name="ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol version 6(IPv6).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__ipv6 = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6(self):
self.__ipv6 = YANGDynClass(base=ipv6.ipv6, is_container='container', presence=False, yang_name="ipv6", rest_name="ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Internet Protocol version 6(IPv6).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_IP_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_track(self):
"""
Getter method for track, mapped from YANG variable /interface/port_channel/track (container)
YANG Description: Track interface
"""
return self.__track
def _set_track(self, v, load=False):
"""
Setter method for track, mapped from YANG variable /interface/port_channel/track (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_track is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_track() directly.
YANG Description: Track interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=track.track, is_container='container', presence=False, yang_name="track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """track must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=track.track, is_container='container', presence=False, yang_name="track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__track = t
if hasattr(self, '_set'):
self._set()
def _unset_track(self):
self.__track = YANGDynClass(base=track.track, is_container='container', presence=False, yang_name="track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_edge_loop_detection(self):
"""
Getter method for edge_loop_detection, mapped from YANG variable /interface/port_channel/edge_loop_detection (container)
YANG Description: Enable edge-loop-detection on the selected interface
"""
return self.__edge_loop_detection
def _set_edge_loop_detection(self, v, load=False):
"""
Setter method for edge_loop_detection, mapped from YANG variable /interface/port_channel/edge_loop_detection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_edge_loop_detection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_edge_loop_detection() directly.
YANG Description: Enable edge-loop-detection on the selected interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """edge_loop_detection must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__edge_loop_detection = t
if hasattr(self, '_set'):
self._set()
def _unset_edge_loop_detection(self):
self.__edge_loop_detection = YANGDynClass(base=edge_loop_detection.edge_loop_detection, is_container='container', presence=False, yang_name="edge-loop-detection", rest_name="edge-loop-detection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable edge-loop-detection on the selected interface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_ELD', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_load_balance(self):
"""
Getter method for load_balance, mapped from YANG variable /interface/port_channel/load_balance (enumeration)
"""
return self.__load_balance
def _set_load_balance(self, v, load=False):
"""
Setter method for load_balance, mapped from YANG variable /interface/port_channel/load_balance (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_balance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_balance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'src-dst-ip-port': {'value': 6}, u'src-mac-vid': {'value': 2}, u'src-dst-ip': {'value': 4}, u'src-dst-ip-mac-vid': {'value': 5}, u'dst-mac-vid': {'value': 1}, u'src-dst-mac-vid': {'value': 3}, u'src-dst-ip-mac-vid-port': {'value': 7}},), default=unicode("src-dst-ip-mac-vid-port"), is_leaf=True, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing Commands'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """load_balance must be of a type compatible with enumeration""",
'defined-type': "brocade-interface:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'src-dst-ip-port': {'value': 6}, u'src-mac-vid': {'value': 2}, u'src-dst-ip': {'value': 4}, u'src-dst-ip-mac-vid': {'value': 5}, u'dst-mac-vid': {'value': 1}, u'src-dst-mac-vid': {'value': 3}, u'src-dst-ip-mac-vid-port': {'value': 7}},), default=unicode("src-dst-ip-mac-vid-port"), is_leaf=True, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing Commands'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)""",
})
self.__load_balance = t
if hasattr(self, '_set'):
self._set()
def _unset_load_balance(self):
self.__load_balance = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'src-dst-ip-port': {'value': 6}, u'src-mac-vid': {'value': 2}, u'src-dst-ip': {'value': 4}, u'src-dst-ip-mac-vid': {'value': 5}, u'dst-mac-vid': {'value': 1}, u'src-dst-mac-vid': {'value': 3}, u'src-dst-ip-mac-vid-port': {'value': 7}},), default=unicode("src-dst-ip-mac-vid-port"), is_leaf=True, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing Commands'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
def _get_fcoeport(self):
"""
Getter method for fcoeport, mapped from YANG variable /interface/port_channel/fcoeport (container)
"""
return self.__fcoeport
def _set_fcoeport(self, v, load=False):
"""
Setter method for fcoeport, mapped from YANG variable /interface/port_channel/fcoeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoeport() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoeport.fcoeport, is_container='container', presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the LAG to enable FCoE', u'display-when': u'(/vcsmode/vcs-mode = "true") or (/fcoe-fsb/fcoe-fsb-enable)', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_FEATURE_FCOE', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'fcoeport_attr_lag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoeport must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fcoeport.fcoeport, is_container='container', presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the LAG to enable FCoE', u'display-when': u'(/vcsmode/vcs-mode = "true") or (/fcoe-fsb/fcoe-fsb-enable)', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_FEATURE_FCOE', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'fcoeport_attr_lag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""",
})
self.__fcoeport = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoeport(self):
self.__fcoeport = YANGDynClass(base=fcoeport.fcoeport, is_container='container', presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the LAG to enable FCoE', u'display-when': u'(/vcsmode/vcs-mode = "true") or (/fcoe-fsb/fcoe-fsb-enable)', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_FEATURE_FCOE', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'fcoeport_attr_lag_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
def _get_mac(self):
"""
Getter method for mac, mapped from YANG variable /interface/port_channel/mac (container)
"""
return self.__mac
def _set_mac(self, v, load=False):
"""
Setter method for mac, mapped from YANG variable /interface/port_channel/mac (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mac.mac, is_container='container', presence=False, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC parameters', u'callpoint': u'MacaclAccessgroupIntPoCP', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_ACL_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mac.mac, is_container='container', presence=False, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC parameters', u'callpoint': u'MacaclAccessgroupIntPoCP', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_ACL_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)""",
})
self.__mac = t
if hasattr(self, '_set'):
self._set()
def _unset_mac(self):
self.__mac = YANGDynClass(base=mac.mac, is_container='container', presence=False, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure MAC parameters', u'callpoint': u'MacaclAccessgroupIntPoCP', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MAC_ACL_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)
def _get_hide_vrrp_holer(self):
"""
Getter method for hide_vrrp_holer, mapped from YANG variable /interface/port_channel/hide_vrrp_holer (container)
"""
return self.__hide_vrrp_holer
def _set_hide_vrrp_holer(self, v, load=False):
"""
Setter method for hide_vrrp_holer, mapped from YANG variable /interface/port_channel/hide_vrrp_holer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hide_vrrp_holer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hide_vrrp_holer() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=hide_vrrp_holer.hide_vrrp_holer, is_container='container', presence=False, yang_name="hide-vrrp-holer", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'/vcsmode/vcs-mode = "false"'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hide_vrrp_holer must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=hide_vrrp_holer.hide_vrrp_holer, is_container='container', presence=False, yang_name="hide-vrrp-holer", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'/vcsmode/vcs-mode = "false"'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""",
})
self.__hide_vrrp_holer = t
if hasattr(self, '_set'):
self._set()
def _unset_hide_vrrp_holer(self):
self.__hide_vrrp_holer = YANGDynClass(base=hide_vrrp_holer.hide_vrrp_holer, is_container='container', presence=False, yang_name="hide-vrrp-holer", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'display-when': u'/vcsmode/vcs-mode = "false"'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
def _get_ip_acl_interface(self):
"""
Getter method for ip_acl_interface, mapped from YANG variable /interface/port_channel/ip_acl_interface (container)
"""
return self.__ip_acl_interface
def _set_ip_acl_interface(self, v, load=False):
"""
Setter method for ip_acl_interface, mapped from YANG variable /interface/port_channel/ip_acl_interface (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_acl_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_acl_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ip_acl_interface.ip_acl_interface, is_container='container', presence=False, yang_name="ip-acl-interface", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'109'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_acl_interface must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ip_acl_interface.ip_acl_interface, is_container='container', presence=False, yang_name="ip-acl-interface", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'109'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)""",
})
self.__ip_acl_interface = t
if hasattr(self, '_set'):
self._set()
def _unset_ip_acl_interface(self):
self.__ip_acl_interface = YANGDynClass(base=ip_acl_interface.ip_acl_interface, is_container='container', presence=False, yang_name="ip-acl-interface", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'109'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)
def _get_service_policy(self):
"""
Getter method for service_policy, mapped from YANG variable /interface/port_channel/service_policy (container)
"""
return self.__service_policy
def _set_service_policy(self, v, load=False):
"""
Setter method for service_policy, mapped from YANG variable /interface/port_channel/service_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_service_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_service_policy() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=service_policy.service_policy, is_container='container', presence=False, yang_name="service-policy", rest_name="service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Input/Output Policy Map', u'callpoint': u'interface_po', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """service_policy must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=service_policy.service_policy, is_container='container', presence=False, yang_name="service-policy", rest_name="service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Input/Output Policy Map', u'callpoint': u'interface_po', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)""",
})
self.__service_policy = t
if hasattr(self, '_set'):
self._set()
def _unset_service_policy(self):
self.__service_policy = YANGDynClass(base=service_policy.service_policy, is_container='container', presence=False, yang_name="service-policy", rest_name="service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Input/Output Policy Map', u'callpoint': u'interface_po', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='container', is_config=True)
def _get_port_profile_port(self):
"""
Getter method for port_profile_port, mapped from YANG variable /interface/port_channel/port_profile_port (empty)
YANG Description: This specifies if a physical/logical port can be
enabled for port-profiling. The presence of this
leaf indicates that the port is enabled for
port-profiling. Else, it is not enabled.
Enabling a port for port-profiling results in to
application of network policies (as per PP-MAC mapping)
following MAC learning process.
"""
return self.__port_profile_port
def _set_port_profile_port(self, v, load=False):
"""
Setter method for port_profile_port, mapped from YANG variable /interface/port_channel/port_profile_port (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_profile_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_profile_port() directly.
YANG Description: This specifies if a physical/logical port can be
enabled for port-profiling. The presence of this
leaf indicates that the port is enabled for
port-profiling. Else, it is not enabled.
Enabling a port for port-profiling results in to
application of network policies (as per PP-MAC mapping)
following MAC learning process.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="port-profile-port", rest_name="port-profile-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'114'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_profile_port must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="port-profile-port", rest_name="port-profile-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'114'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True)""",
})
self.__port_profile_port = t
if hasattr(self, '_set'):
self._set()
def _unset_port_profile_port(self):
self.__port_profile_port = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="port-profile-port", rest_name="port-profile-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the interface to AMPP profile mode', u'sort-priority': u'114'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='empty', is_config=True)
def _get_port_profile_to_interface_associations(self):
"""
Getter method for port_profile_to_interface_associations, mapped from YANG variable /interface/port_channel/port_profile_to_interface_associations (container)
"""
return self.__port_profile_to_interface_associations
def _set_port_profile_to_interface_associations(self, v, load=False):
"""
Setter method for port_profile_to_interface_associations, mapped from YANG variable /interface/port_channel/port_profile_to_interface_associations (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_profile_to_interface_associations is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_profile_to_interface_associations() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_profile_to_interface_associations.port_profile_to_interface_associations, is_container='container', presence=False, yang_name="port-profile-to-interface-associations", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_profile_to_interface_associations must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_profile_to_interface_associations.port_profile_to_interface_associations, is_container='container', presence=False, yang_name="port-profile-to-interface-associations", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)""",
})
self.__port_profile_to_interface_associations = t
if hasattr(self, '_set'):
self._set()
def _unset_port_profile_to_interface_associations(self):
self.__port_profile_to_interface_associations = YANGDynClass(base=port_profile_to_interface_associations.port_profile_to_interface_associations, is_container='container', presence=False, yang_name="port-profile-to-interface-associations", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)
def _get_priority_tag_enable(self):
"""
Getter method for priority_tag_enable, mapped from YANG variable /interface/port_channel/priority_tag_enable (empty)
"""
return self.__priority_tag_enable
def _set_priority_tag_enable(self, v, load=False):
"""
Setter method for priority_tag_enable, mapped from YANG variable /interface/port_channel/priority_tag_enable (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority_tag_enable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority_tag_enable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="priority-tag-enable", rest_name="priority-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure 802.1p priority tagging', u'cli-full-command': None, u'callpoint': u'interface_po', u'alt-name': u'priority-tag'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority_tag_enable must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="priority-tag-enable", rest_name="priority-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure 802.1p priority tagging', u'cli-full-command': None, u'callpoint': u'interface_po', u'alt-name': u'priority-tag'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='empty', is_config=True)""",
})
self.__priority_tag_enable = t
if hasattr(self, '_set'):
self._set()
def _unset_priority_tag_enable(self):
self.__priority_tag_enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="priority-tag-enable", rest_name="priority-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure 802.1p priority tagging', u'cli-full-command': None, u'callpoint': u'interface_po', u'alt-name': u'priority-tag'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='empty', is_config=True)
def _get_qos(self):
"""
Getter method for qos, mapped from YANG variable /interface/port_channel/qos (container)
"""
return self.__qos
def _set_qos(self, v, load=False):
"""
Setter method for qos, mapped from YANG variable /interface/port_channel/qos (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_qos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_qos() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=qos.qos, is_container='container', presence=False, yang_name="qos", rest_name="qos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Quality of Service (QoS)', u'cli-incomplete-no': None, u'callpoint': u'interface_po', u'sort-priority': u'93'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """qos must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=qos.qos, is_container='container', presence=False, yang_name="qos", rest_name="qos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Quality of Service (QoS)', u'cli-incomplete-no': None, u'callpoint': u'interface_po', u'sort-priority': u'93'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)""",
})
self.__qos = t
if hasattr(self, '_set'):
self._set()
def _unset_qos(self):
self.__qos = YANGDynClass(base=qos.qos, is_container='container', presence=False, yang_name="qos", rest_name="qos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Quality of Service (QoS)', u'cli-incomplete-no': None, u'callpoint': u'interface_po', u'sort-priority': u'93'}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)
def _get_vlan(self):
"""
Getter method for vlan, mapped from YANG variable /interface/port_channel/vlan (container)
"""
return self.__vlan
def _set_vlan(self, v, load=False):
"""
Setter method for vlan, mapped from YANG variable /interface/port_channel/vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlan.vlan, is_container='container', presence=False, yang_name="vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlan commands', u'cli-incomplete-no': None, u'callpoint': u'VlanClassifierActivateCallpointWorker_po', u'sort-priority': u'97'}}, namespace='urn:brocade.com:mgmt:brocade-vlan', defining_module='brocade-vlan', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlan.vlan, is_container='container', presence=False, yang_name="vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlan commands', u'cli-incomplete-no': None, u'callpoint': u'VlanClassifierActivateCallpointWorker_po', u'sort-priority': u'97'}}, namespace='urn:brocade.com:mgmt:brocade-vlan', defining_module='brocade-vlan', yang_type='container', is_config=True)""",
})
self.__vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan(self):
self.__vlan = YANGDynClass(base=vlan.vlan, is_container='container', presence=False, yang_name="vlan", rest_name="vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Vlan commands', u'cli-incomplete-no': None, u'callpoint': u'VlanClassifierActivateCallpointWorker_po', u'sort-priority': u'97'}}, namespace='urn:brocade.com:mgmt:brocade-vlan', defining_module='brocade-vlan', yang_type='container', is_config=True)
def _get_bpdu_drop(self):
"""
Getter method for bpdu_drop, mapped from YANG variable /interface/port_channel/bpdu_drop (container)
"""
return self.__bpdu_drop
def _set_bpdu_drop(self, v, load=False):
"""
Setter method for bpdu_drop, mapped from YANG variable /interface/port_channel/bpdu_drop (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bpdu_drop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bpdu_drop() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bpdu_drop must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__bpdu_drop = t
if hasattr(self, '_set'):
self._set()
def _unset_bpdu_drop(self):
self.__bpdu_drop = YANGDynClass(base=bpdu_drop.bpdu_drop, is_container='container', presence=False, yang_name="bpdu-drop", rest_name="bpdu-drop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Drop received BPDUs', u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
def _get_tunnel(self):
"""
Getter method for tunnel, mapped from YANG variable /interface/port_channel/tunnel (container)
"""
return self.__tunnel
def _set_tunnel(self, v, load=False):
"""
Setter method for tunnel, mapped from YANG variable /interface/port_channel/tunnel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tunnel.tunnel, is_container='container', presence=False, yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'info': u'Tunneling parameters'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tunnel must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tunnel.tunnel, is_container='container', presence=False, yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'info': u'Tunneling parameters'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__tunnel = t
if hasattr(self, '_set'):
self._set()
def _unset_tunnel(self):
self.__tunnel = YANGDynClass(base=tunnel.tunnel, is_container='container', presence=False, yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'po-stp-config', u'sort-priority': u'98', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None, u'info': u'Tunneling parameters'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
def _get_spanning_tree(self):
"""
Getter method for spanning_tree, mapped from YANG variable /interface/port_channel/spanning_tree (container)
"""
return self.__spanning_tree
def _set_spanning_tree(self, v, load=False):
"""
Setter method for spanning_tree, mapped from YANG variable /interface/port_channel/spanning_tree (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_spanning_tree is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spanning_tree() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=spanning_tree.spanning_tree, is_container='container', presence=False, yang_name="spanning-tree", rest_name="spanning-tree", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Spanning tree commands', u'sort-priority': u'98', u'callpoint': u'po-stp-config', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spanning_tree must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=spanning_tree.spanning_tree, is_container='container', presence=False, yang_name="spanning-tree", rest_name="spanning-tree", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Spanning tree commands', u'sort-priority': u'98', u'callpoint': u'po-stp-config', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__spanning_tree = t
if hasattr(self, '_set'):
self._set()
def _unset_spanning_tree(self):
self.__spanning_tree = YANGDynClass(base=spanning_tree.spanning_tree, is_container='container', presence=False, yang_name="spanning-tree", rest_name="spanning-tree", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Spanning tree commands', u'sort-priority': u'98', u'callpoint': u'po-stp-config', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
cee = __builtin__.property(_get_cee, _set_cee)
vlag = __builtin__.property(_get_vlag, _set_vlag)
po_speed = __builtin__.property(_get_po_speed, _set_po_speed)
ifindex = __builtin__.property(_get_ifindex)
description = __builtin__.property(_get_description, _set_description)
shutdown = __builtin__.property(_get_shutdown, _set_shutdown)
minimum_links = __builtin__.property(_get_minimum_links, _set_minimum_links)
snmp = __builtin__.property(_get_snmp, _set_snmp)
mtu = __builtin__.property(_get_mtu, _set_mtu)
switchport_basic = __builtin__.property(_get_switchport_basic, _set_switchport_basic)
switchport = __builtin__.property(_get_switchport, _set_switchport)
mac_learning = __builtin__.property(_get_mac_learning, _set_mac_learning)
ip = __builtin__.property(_get_ip, _set_ip)
ipv6 = __builtin__.property(_get_ipv6, _set_ipv6)
track = __builtin__.property(_get_track, _set_track)
edge_loop_detection = __builtin__.property(_get_edge_loop_detection, _set_edge_loop_detection)
load_balance = __builtin__.property(_get_load_balance, _set_load_balance)
fcoeport = __builtin__.property(_get_fcoeport, _set_fcoeport)
mac = __builtin__.property(_get_mac, _set_mac)
hide_vrrp_holer = __builtin__.property(_get_hide_vrrp_holer, _set_hide_vrrp_holer)
ip_acl_interface = __builtin__.property(_get_ip_acl_interface, _set_ip_acl_interface)
service_policy = __builtin__.property(_get_service_policy, _set_service_policy)
port_profile_port = __builtin__.property(_get_port_profile_port, _set_port_profile_port)
port_profile_to_interface_associations = __builtin__.property(_get_port_profile_to_interface_associations, _set_port_profile_to_interface_associations)
priority_tag_enable = __builtin__.property(_get_priority_tag_enable, _set_priority_tag_enable)
qos = __builtin__.property(_get_qos, _set_qos)
vlan = __builtin__.property(_get_vlan, _set_vlan)
bpdu_drop = __builtin__.property(_get_bpdu_drop, _set_bpdu_drop)
tunnel = __builtin__.property(_get_tunnel, _set_tunnel)
spanning_tree = __builtin__.property(_get_spanning_tree, _set_spanning_tree)
_pyangbind_elements = {'name': name, 'cee': cee, 'vlag': vlag, 'po_speed': po_speed, 'ifindex': ifindex, 'description': description, 'shutdown': shutdown, 'minimum_links': minimum_links, 'snmp': snmp, 'mtu': mtu, 'switchport_basic': switchport_basic, 'switchport': switchport, 'mac_learning': mac_learning, 'ip': ip, 'ipv6': ipv6, 'track': track, 'edge_loop_detection': edge_loop_detection, 'load_balance': load_balance, 'fcoeport': fcoeport, 'mac': mac, 'hide_vrrp_holer': hide_vrrp_holer, 'ip_acl_interface': ip_acl_interface, 'service_policy': service_policy, 'port_profile_port': port_profile_port, 'port_profile_to_interface_associations': port_profile_to_interface_associations, 'priority_tag_enable': priority_tag_enable, 'qos': qos, 'vlan': vlan, 'bpdu_drop': bpdu_drop, 'tunnel': tunnel, 'spanning_tree': spanning_tree, }
| [
[
[
22,
32
]
],
[
[
40,
80
],
[
2044,
2055
]
],
[
[
117,
147
]
],
[
[
149,
168
],
[
3258,
3277
],
[
3363,
3382
],
[
6182,
6201
],
[
11962,
11981
],
[
14530,
14549
],
[
15369,
15388
],
[
16765,
16784
],
[
16795,
16814
],
[
18478,
18497
],
[
18508,
18527
],
[
18659,
18678
],
[
19169,
19188
],
[
22147,
22166
],
[
22177,
22196
],
[
23662,
23681
],
[
23692,
23711
],
[
25032,
25051
],
[
26509,
26528
],
[
30344,
30363
],
[
32129,
32148
],
[
33475,
33494
],
[
34626,
34645
],
[
35707,
35726
],
[
37160,
37179
],
[
41073,
41092
],
[
41178,
41197
],
[
42880,
42899
],
[
42985,
43004
],
[
47469,
47488
],
[
47499,
47518
],
[
47650,
47669
],
[
49162,
49181
],
[
49192,
49211
],
[
49343,
49362
],
[
69271,
69290
],
[
71300,
71319
]
],
[
[
170,
183
]
],
[
[
220,
228
],
[
6762,
6770
],
[
8773,
8781
],
[
19604,
19612
],
[
38355,
38363
],
[
39680,
39688
],
[
86597,
86605
],
[
87858,
87866
],
[
91932,
91940
],
[
93273,
93281
]
],
[
[
230,
242
]
],
[
[
244,
256
],
[
2705,
2717
],
[
3240,
3252
],
[
3986,
3998
],
[
4516,
4528
],
[
4990,
5002
],
[
5534,
5546
],
[
6164,
6176
],
[
6744,
6756
],
[
7249,
7261
],
[
7746,
7758
],
[
8266,
8278
],
[
8755,
8767
],
[
9211,
9223
],
[
9775,
9787
],
[
10425,
10437
],
[
10937,
10949
],
[
11485,
11497
],
[
11944,
11956
],
[
12508,
12520
],
[
12948,
12960
],
[
13464,
13476
],
[
13975,
13987
],
[
14512,
14524
],
[
15351,
15363
],
[
16076,
16088
],
[
16747,
16759
],
[
17342,
17354
],
[
17917,
17929
],
[
18460,
18472
],
[
19151,
19163
],
[
19586,
19598
],
[
22127,
22139
],
[
23644,
23656
],
[
25012,
25024
],
[
26491,
26503
],
[
27801,
27813
],
[
29012,
29024
],
[
30324,
30336
],
[
32111,
32123
],
[
33455,
33467
],
[
34608,
34620
],
[
35687,
35699
],
[
37142,
37154
],
[
38335,
38347
],
[
39662,
39674
],
[
41053,
41065
],
[
42862,
42874
],
[
44330,
44342
],
[
45755,
45767
],
[
47449,
47461
],
[
49144,
49156
],
[
50542,
50554
],
[
51915,
51927
],
[
53226,
53238
],
[
54703,
54715
],
[
55950,
55962
],
[
57313,
57325
],
[
58535,
58547
],
[
59892,
59904
],
[
61153,
61165
],
[
62556,
62568
],
[
63793,
63805
],
[
64984,
64996
],
[
66313,
66325
],
[
67942,
67954
],
[
69251,
69263
],
[
71282,
71294
],
[
72755,
72767
],
[
74420,
74432
],
[
75684,
75696
],
[
77045,
77057
],
[
78275,
78287
],
[
79578,
79590
],
[
80764,
80776
],
[
82071,
82083
],
[
83239,
83251
],
[
84622,
84634
],
[
86577,
86589
],
[
87840,
87852
],
[
89204,
89216
],
[
90671,
90683
],
[
91912,
91924
],
[
93255,
93267
],
[
94340,
94352
],
[
95613,
95625
],
[
96700,
96712
],
[
98023,
98035
],
[
99178,
99190
],
[
100737,
100749
],
[
101973,
101985
],
[
103376,
103388
],
[
104603,
104615
],
[
105998,
106010
]
],
[
[
258,
271
]
],
[
[
303,
313
],
[
775,
785
]
],
[
[
334,
341
]
],
[
[
363,
371
]
],
[
[
379,
390
],
[
106516,
106527
],
[
106567,
106578
],
[
106617,
106628
],
[
106673,
106684
],
[
106736,
106747
],
[
106787,
106798
],
[
106857,
106868
],
[
106926,
106937
],
[
106996,
107007
],
[
107047,
107058
],
[
107109,
107120
],
[
107191,
107202
],
[
107263,
107274
],
[
107329,
107340
],
[
107377,
107388
],
[
107430,
107441
],
[
107499,
107510
],
[
107589,
107600
],
[
107661,
107672
],
[
107720,
107731
],
[
107781,
107792
],
[
107867,
107878
],
[
107953,
107964
],
[
108038,
108049
],
[
108150,
108161
],
[
108285,
108296
],
[
108366,
108377
],
[
108416,
108427
],
[
108473,
108484
],
[
108537,
108548
],
[
108602,
108613
]
],
[
[
398,
402
],
[
11503,
11507
],
[
27821,
27825
],
[
29030,
29034
]
],
[
[
410,
414
],
[
9229,
9233
],
[
44350,
44354
],
[
45773,
45777
]
],
[
[
422,
438
],
[
7764,
7780
],
[
50562,
50578
],
[
51933,
51949
]
],
[
[
446,
456
],
[
17360,
17370
],
[
53246,
53256
],
[
54721,
54731
]
],
[
[
464,
476
],
[
13482,
13494
],
[
55970,
55982
],
[
57331,
57343
]
],
[
[
484,
486
],
[
5008,
5010
],
[
58555,
58557
],
[
59910,
59912
]
],
[
[
494,
498
],
[
10955,
10959
],
[
61173,
61177
],
[
62574,
62578
]
],
[
[
506,
511
],
[
12526,
12531
],
[
63813,
63818
],
[
65002,
65007
]
],
[
[
519,
538
],
[
5552,
5571
],
[
66333,
66352
],
[
67960,
67979
]
],
[
[
546,
554
],
[
16094,
16102
],
[
72775,
72783
],
[
74438,
74446
]
],
[
[
562,
565
],
[
13993,
13996
],
[
75704,
75707
],
[
77063,
77066
]
],
[
[
573,
588
],
[
8284,
8299
],
[
78295,
78310
],
[
79596,
79611
]
],
[
[
596,
612
],
[
4534,
4550
],
[
80784,
80800
],
[
82089,
82105
]
],
[
[
620,
634
],
[
4004,
4018
],
[
83259,
83273
],
[
84640,
84654
]
],
[
[
642,
680
],
[
10443,
10481
],
[
89224,
89262
],
[
90689,
90727
]
],
[
[
688,
691
],
[
7267,
7270
],
[
94360,
94363
],
[
95631,
95634
]
],
[
[
699,
703
],
[
12966,
12970
],
[
96720,
96724
],
[
98041,
98045
]
],
[
[
711,
720
],
[
9793,
9802
],
[
99198,
99207
],
[
100755,
100764
]
],
[
[
728,
734
],
[
17935,
17941
],
[
101993,
101999
],
[
103394,
103400
]
],
[
[
742,
755
],
[
2723,
2736
],
[
104623,
104636
],
[
106016,
106029
]
],
[
[
762,
774
]
]
] |
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Intel Corp.
#
# Authors: Yunhong Jiang <yunhong.jiang@intel.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from stevedore import extension
from ceilometer.openstack.common.fixture import mockpatch
from ceilometer.openstack.common import test
from ceilometer.openstack.common import timeutils
from ceilometer import pipeline
from ceilometer import publisher
from ceilometer.publisher import test as test_publisher
from ceilometer import sample
from ceilometer import transformer
from ceilometer.transformer import accumulator
from ceilometer.transformer import conversions
class TestTransformerAccumulator(test.BaseTestCase):
def test_handle_sample(self):
test_sample = sample.Sample(
name='a',
type=sample.TYPE_GAUGE,
volume=1,
unit='B',
user_id="test_user",
project_id="test_proj",
resource_id="test_resource",
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
)
# Test when size is set to less than 1.
tf = accumulator.TransformerAccumulator(size=0)
self.assertEqual(tf.handle_sample(None, test_sample), test_sample)
self.assertFalse(hasattr(tf, 'samples'))
# Test when size is set to greater or equal than 1.
tf = accumulator.TransformerAccumulator(size=2)
tf.handle_sample(None, test_sample)
self.assertEqual(len(tf.samples), 1)
class TestPipeline(test.BaseTestCase):
def fake_tem_init(self):
"""Fake a transformerManager for pipeline
The faked entry point setting is below:
update: TransformerClass
except: TransformerClassException
drop: TransformerClassDrop
"""
pass
def fake_tem_get_ext(self, name):
class_name_ext = {
'update': self.TransformerClass,
'except': self.TransformerClassException,
'drop': self.TransformerClassDrop,
'cache': accumulator.TransformerAccumulator,
'unit_conversion': conversions.ScalingTransformer,
'rate_of_change': conversions.RateOfChangeTransformer,
}
if name in class_name_ext:
return extension.Extension(name, None,
class_name_ext[name],
None,
)
raise KeyError(name)
def get_publisher(self, url, namespace=''):
fake_drivers = {'test://': test_publisher.TestPublisher,
'new://': test_publisher.TestPublisher,
'except://': self.PublisherClassException}
return fake_drivers[url](url)
class PublisherClassException(publisher.PublisherBase):
def publish_samples(self, ctxt, counters):
raise Exception()
class TransformerClass(transformer.TransformerBase):
samples = []
def __init__(self, append_name='_update'):
self.__class__.samples = []
self.append_name = append_name
def flush(self, ctxt):
return []
def handle_sample(self, ctxt, counter):
self.__class__.samples.append(counter)
newname = getattr(counter, 'name') + self.append_name
return sample.Sample(
name=newname,
type=counter.type,
volume=counter.volume,
unit=counter.unit,
user_id=counter.user_id,
project_id=counter.project_id,
resource_id=counter.resource_id,
timestamp=counter.timestamp,
resource_metadata=counter.resource_metadata,
)
class TransformerClassDrop(transformer.TransformerBase):
samples = []
def __init__(self):
self.__class__.samples = []
def handle_sample(self, ctxt, counter):
self.__class__.samples.append(counter)
class TransformerClassException(object):
def handle_sample(self, ctxt, counter):
raise Exception()
def setUp(self):
super(TestPipeline, self).setUp()
self.test_counter = sample.Sample(
name='a',
type=sample.TYPE_GAUGE,
volume=1,
unit='B',
user_id="test_user",
project_id="test_proj",
resource_id="test_resource",
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
)
self.useFixture(mockpatch.PatchObject(
transformer.TransformerExtensionManager, "__init__",
side_effect=self.fake_tem_init))
self.useFixture(mockpatch.PatchObject(
transformer.TransformerExtensionManager, "get_ext",
side_effect=self.fake_tem_get_ext))
self.useFixture(mockpatch.PatchObject(
publisher, 'get_publisher', side_effect=self.get_publisher))
self.transformer_manager = transformer.TransformerExtensionManager()
self.pipeline_cfg = [{
'name': "test_pipeline",
'interval': 5,
'counters': ['a'],
'transformers': [
{'name': "update",
'parameters': {}}
],
'publishers': ["test://"],
}, ]
def _exception_create_pipelinemanager(self):
self.assertRaises(pipeline.PipelineException,
pipeline.PipelineManager,
self.pipeline_cfg,
self.transformer_manager)
def test_no_counters(self):
del self.pipeline_cfg[0]['counters']
self._exception_create_pipelinemanager()
def test_no_transformers(self):
del self.pipeline_cfg[0]['transformers']
self._exception_create_pipelinemanager()
def test_no_name(self):
del self.pipeline_cfg[0]['name']
self._exception_create_pipelinemanager()
def test_no_interval(self):
del self.pipeline_cfg[0]['interval']
self._exception_create_pipelinemanager()
def test_no_publishers(self):
del self.pipeline_cfg[0]['publishers']
self._exception_create_pipelinemanager()
def test_invalid_resources(self):
invalid_resource = {'invalid': 1}
self.pipeline_cfg[0]['resources'] = invalid_resource
self._exception_create_pipelinemanager()
def test_check_counters_include_exclude_same(self):
counter_cfg = ['a', '!a']
self.pipeline_cfg[0]['counters'] = counter_cfg
self._exception_create_pipelinemanager()
def test_check_counters_include_exclude(self):
counter_cfg = ['a', '!b']
self.pipeline_cfg[0]['counters'] = counter_cfg
self._exception_create_pipelinemanager()
def test_check_counters_wildcard_included(self):
counter_cfg = ['a', '*']
self.pipeline_cfg[0]['counters'] = counter_cfg
self._exception_create_pipelinemanager()
def test_check_publishers_invalid_publisher(self):
publisher_cfg = ['test_invalid']
self.pipeline_cfg[0]['publishers'] = publisher_cfg
def test_invalid_string_interval(self):
self.pipeline_cfg[0]['interval'] = 'string'
self._exception_create_pipelinemanager()
def test_check_transformer_invalid_transformer(self):
transformer_cfg = [
{'name': "test_invalid",
'parameters': {}}
]
self.pipeline_cfg[0]['transformers'] = transformer_cfg
self._exception_create_pipelinemanager()
def test_get_interval(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
self.assertTrue(pipe.get_interval() == 5)
def test_publisher_transformer_invoked(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertTrue(len(self.TransformerClass.samples) == 1)
self.assertEqual(getattr(publisher.samples[0], "name"), 'a_update')
self.assertTrue(getattr(self.TransformerClass.samples[0], "name")
== 'a')
def test_multiple_included_counters(self):
counter_cfg = ['a', 'b']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.test_counter = sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertEqual(len(publisher.samples), 2)
self.assertTrue(len(self.TransformerClass.samples) == 2)
self.assertEqual(getattr(publisher.samples[0], "name"), 'a_update')
self.assertEqual(getattr(publisher.samples[1], "name"), 'b_update')
def test_counter_dont_match(self):
counter_cfg = ['nomatch']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 0)
self.assertEqual(publisher.calls, 0)
def test_wildcard_counter(self):
counter_cfg = ['*']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertTrue(len(self.TransformerClass.samples) == 1)
self.assertEqual(getattr(publisher.samples[0], "name"), 'a_update')
def test_wildcard_excluded_counters(self):
counter_cfg = ['*', '!a']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].support_meter('a'))
def test_wildcard_excluded_counters_not_excluded(self):
counter_cfg = ['*', '!b']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(len(self.TransformerClass.samples), 1)
self.assertEqual(getattr(publisher.samples[0], "name"),
'a_update')
def test_all_excluded_counters_not_excluded(self):
counter_cfg = ['!b', '!c']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertTrue(len(self.TransformerClass.samples) == 1)
self.assertEqual(getattr(publisher.samples[0], "name"), 'a_update')
self.assertTrue(getattr(self.TransformerClass.samples[0], "name")
== 'a')
def test_all_excluded_counters_is_excluded(self):
counter_cfg = ['!a', '!c']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].support_meter('a'))
self.assertTrue(pipeline_manager.pipelines[0].support_meter('b'))
self.assertFalse(pipeline_manager.pipelines[0].support_meter('c'))
def test_wildcard_and_excluded_wildcard_counters(self):
counter_cfg = ['*', '!disk.*']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].
support_meter('disk.read.bytes'))
self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu'))
def test_included_counter_and_wildcard_counters(self):
counter_cfg = ['cpu', 'disk.*']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertTrue(pipeline_manager.pipelines[0].
support_meter('disk.read.bytes'))
self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu'))
self.assertFalse(pipeline_manager.pipelines[0].
support_meter('instance'))
def test_excluded_counter_and_excluded_wildcard_counters(self):
counter_cfg = ['!cpu', '!disk.*']
self.pipeline_cfg[0]['counters'] = counter_cfg
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].
support_meter('disk.read.bytes'))
self.assertFalse(pipeline_manager.pipelines[0].support_meter('cpu'))
self.assertTrue(pipeline_manager.pipelines[0].
support_meter('instance'))
def test_multiple_pipeline(self):
self.pipeline_cfg.append({
'name': 'second_pipeline',
'interval': 5,
'counters': ['b'],
'transformers': [{
'name': 'update',
'parameters':
{
"append_name": "_new",
}
}],
'publishers': ['new'],
})
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.test_counter = sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(publisher.calls, 1)
self.assertEqual(getattr(publisher.samples[0], "name"), 'a_update')
new_publisher = pipeline_manager.pipelines[1].publishers[0]
self.assertEqual(len(new_publisher.samples), 1)
self.assertEqual(new_publisher.calls, 1)
self.assertEqual(getattr(new_publisher.samples[0], "name"), 'b_new')
self.assertTrue(getattr(self.TransformerClass.samples[0], "name")
== 'a')
self.assertTrue(len(self.TransformerClass.samples) == 2)
self.assertTrue(getattr(self.TransformerClass.samples[0], "name")
== 'a')
self.assertTrue(getattr(self.TransformerClass.samples[1], "name")
== 'b')
def test_multiple_pipeline_exception(self):
self.pipeline_cfg.append({
'name': "second_pipeline",
"interval": 5,
'counters': ['b'],
'transformers': [{
'name': 'update',
'parameters':
{
"append_name": "_new",
}
}],
'publishers': ['except'],
})
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.test_counter = sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(publisher.calls, 1)
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(getattr(publisher.samples[0], "name"), 'a_update')
self.assertTrue(len(self.TransformerClass.samples) == 2)
self.assertTrue(getattr(self.TransformerClass.samples[0], "name")
== 'a')
self.assertTrue(getattr(self.TransformerClass.samples[1], "name")
== 'b')
def test_none_transformer_pipeline(self):
self.pipeline_cfg[0]['transformers'] = None
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(publisher.calls, 1)
self.assertEqual(getattr(publisher.samples[0], 'name'), 'a')
def test_empty_transformer_pipeline(self):
self.pipeline_cfg[0]['transformers'] = []
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(publisher.calls, 1)
self.assertEqual(getattr(publisher.samples[0], 'name'), 'a')
def test_multiple_transformer_same_class(self):
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'update',
'parameters': {}
},
{
'name': 'update',
'parameters': {}
},
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(publisher.calls, 1)
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(getattr(publisher.samples[0], 'name'),
'a_update_update')
self.assertTrue(len(self.TransformerClass.samples) == 2)
self.assertTrue(getattr(self.TransformerClass.samples[0], 'name')
== 'a')
self.assertTrue(getattr(self.TransformerClass.samples[1], 'name')
== 'a_update')
def test_multiple_transformer_same_class_different_parameter(self):
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'update',
'parameters':
{
"append_name": "_update",
}
},
{
'name': 'update',
'parameters':
{
"append_name": "_new",
}
},
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertTrue(len(self.TransformerClass.samples) == 2)
self.assertTrue(getattr(self.TransformerClass.samples[0], 'name')
== 'a')
self.assertTrue(getattr(self.TransformerClass.samples[1], 'name')
== 'a_update')
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(getattr(publisher.samples[0], 'name'),
'a_update_new')
def test_multiple_transformer_drop_transformer(self):
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'update',
'parameters':
{
"append_name": "_update",
}
},
{
'name': 'drop',
'parameters': {}
},
{
'name': 'update',
'parameters':
{
"append_name": "_new",
}
},
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 0)
self.assertTrue(len(self.TransformerClass.samples) == 1)
self.assertTrue(getattr(self.TransformerClass.samples[0], 'name')
== 'a')
self.assertTrue(len(self.TransformerClassDrop.samples) == 1)
self.assertTrue(getattr(self.TransformerClassDrop.samples[0], 'name')
== 'a_update')
def test_multiple_publisher(self):
self.pipeline_cfg[0]['publishers'] = ['test://', 'new://']
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
new_publisher = pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(len(new_publisher.samples), 1)
self.assertEqual(getattr(new_publisher.samples[0], 'name'),
'a_update')
self.assertEqual(getattr(publisher.samples[0], 'name'),
'a_update')
def test_multiple_publisher_isolation(self):
self.pipeline_cfg[0]['publishers'] = ['except://', 'new://']
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
new_publisher = pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(len(new_publisher.samples), 1)
self.assertEqual(getattr(new_publisher.samples[0], 'name'),
'a_update')
def test_multiple_counter_pipeline(self):
self.pipeline_cfg[0]['counters'] = ['a', 'b']
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter,
sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 2)
self.assertEqual(getattr(publisher.samples[0], 'name'), 'a_update')
self.assertEqual(getattr(publisher.samples[1], 'name'), 'b_update')
def test_flush_pipeline_cache(self):
CACHE_SIZE = 10
self.pipeline_cfg[0]['transformers'].extend([
{
'name': 'cache',
'parameters': {
'size': CACHE_SIZE,
}
},
{
'name': 'update',
'parameters':
{
'append_name': '_new'
}
}, ]
)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_sample(None, self.test_counter)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 0)
pipe.flush(None)
self.assertEqual(len(publisher.samples), 0)
pipe.publish_sample(None, self.test_counter)
pipe.flush(None)
self.assertEqual(len(publisher.samples), 0)
for i in range(CACHE_SIZE - 2):
pipe.publish_sample(None, self.test_counter)
pipe.flush(None)
self.assertEqual(len(publisher.samples), CACHE_SIZE)
self.assertTrue(getattr(publisher.samples[0], 'name')
== 'a_update_new')
def test_flush_pipeline_cache_multiple_counter(self):
CACHE_SIZE = 3
self.pipeline_cfg[0]['transformers'].extend([
{
'name': 'cache',
'parameters': {
'size': CACHE_SIZE
}
},
{
'name': 'update',
'parameters':
{
'append_name': '_new'
}
}, ]
)
self.pipeline_cfg[0]['counters'] = ['a', 'b']
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter,
sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 0)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertEqual(len(publisher.samples), CACHE_SIZE)
self.assertEqual(getattr(publisher.samples[0], 'name'),
'a_update_new')
self.assertEqual(getattr(publisher.samples[1], 'name'),
'b_update_new')
def test_flush_pipeline_cache_before_publisher(self):
self.pipeline_cfg[0]['transformers'].append({
'name': 'cache',
'parameters': {}
})
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
publisher = pipe.publishers[0]
pipe.publish_sample(None, self.test_counter)
self.assertEqual(len(publisher.samples), 0)
pipe.flush(None)
self.assertEqual(len(publisher.samples), 1)
self.assertEqual(getattr(publisher.samples[0], 'name'),
'a_update')
def test_variable_counter(self):
self.pipeline_cfg = [{
'name': "test_pipeline",
'interval': 5,
'counters': ['a:*'],
'transformers': [
{'name': "update",
'parameters': {}}
],
'publishers': ["test://"],
}, ]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.test_counter = sample.Sample(
name='a:b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
self.assertTrue(len(self.TransformerClass.samples) == 1)
self.assertEqual(getattr(publisher.samples[0], "name"),
'a:b_update')
self.assertTrue(getattr(self.TransformerClass.samples[0], "name")
== 'a:b')
def test_global_unit_conversion(self):
scale = 'volume / ((10**6) * 60)'
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': scale},
}
},
]
self.pipeline_cfg[0]['counters'] = ['cpu']
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=1200000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_samples(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 1)
pipe.flush(None)
self.assertEqual(len(publisher.samples), 1)
cpu_mins = publisher.samples[-1]
self.assertEqual(getattr(cpu_mins, 'name'), 'cpu_mins')
self.assertEqual(getattr(cpu_mins, 'unit'), 'min')
self.assertEqual(getattr(cpu_mins, 'type'), sample.TYPE_CUMULATIVE)
self.assertEqual(getattr(cpu_mins, 'volume'), 20)
def test_unit_identified_source_unit_conversion(self):
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'unit_conversion',
'parameters': {
'source': {'unit': '°C'},
'target': {'unit': '°F',
'scale': '(volume * 1.8) + 32'},
}
},
]
self.pipeline_cfg[0]['counters'] = ['core_temperature',
'ambient_temperature']
counters = [
sample.Sample(
name='core_temperature',
type=sample.TYPE_GAUGE,
volume=36.0,
unit='°C',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
sample.Sample(
name='ambient_temperature',
type=sample.TYPE_GAUGE,
volume=88.8,
unit='°F',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_samples(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 2)
core_temp = publisher.samples[1]
self.assertEqual(getattr(core_temp, 'name'), 'core_temperature')
self.assertEqual(getattr(core_temp, 'unit'), '°F')
self.assertEqual(getattr(core_temp, 'volume'), 96.8)
amb_temp = publisher.samples[0]
self.assertEqual(getattr(amb_temp, 'name'), 'ambient_temperature')
self.assertEqual(getattr(amb_temp, 'unit'), '°F')
self.assertEqual(getattr(amb_temp, 'volume'), 88.8)
self.assertEqual(getattr(core_temp, 'volume'), 96.8)
def _do_test_rate_of_change_conversion(self, prev, curr, type, expected,
offset=1, weight=None):
s = "(resource_metadata.user_metadata.autoscaling_weight or 1.0)" \
"* (resource_metadata.non.existent or 1.0)" \
"* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))"
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s},
}
},
]
self.pipeline_cfg[0]['counters'] = ['cpu']
now = timeutils.utcnow()
later = now + datetime.timedelta(minutes=offset)
um = {'autoscaling_weight': weight} if weight else {}
counters = [
sample.Sample(
name='cpu',
type=type,
volume=prev,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=prev,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource2',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 2,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=curr,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 4,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=curr,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource2',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 2,
'user_metadata': um},
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_samples(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 2)
pipe.flush(None)
self.assertEqual(len(publisher.samples), 2)
cpu_util = publisher.samples[0]
self.assertEqual(getattr(cpu_util, 'name'), 'cpu_util')
self.assertEqual(getattr(cpu_util, 'resource_id'), 'test_resource')
self.assertEqual(getattr(cpu_util, 'unit'), '%')
self.assertEqual(getattr(cpu_util, 'type'), sample.TYPE_GAUGE)
self.assertEqual(getattr(cpu_util, 'volume'), expected)
cpu_util = publisher.samples[1]
self.assertEqual(getattr(cpu_util, 'name'), 'cpu_util')
self.assertEqual(getattr(cpu_util, 'resource_id'), 'test_resource2')
self.assertEqual(getattr(cpu_util, 'unit'), '%')
self.assertEqual(getattr(cpu_util, 'type'), sample.TYPE_GAUGE)
self.assertEqual(getattr(cpu_util, 'volume'), expected * 2)
def test_rate_of_change_conversion(self):
self._do_test_rate_of_change_conversion(120000000000,
180000000000,
sample.TYPE_CUMULATIVE,
25.0)
def test_rate_of_change_conversion_weight(self):
self._do_test_rate_of_change_conversion(120000000000,
180000000000,
sample.TYPE_CUMULATIVE,
27.5,
weight=1.1)
def test_rate_of_change_conversion_negative_cumulative_delta(self):
self._do_test_rate_of_change_conversion(180000000000,
120000000000,
sample.TYPE_CUMULATIVE,
50.0)
def test_rate_of_change_conversion_negative_gauge_delta(self):
self._do_test_rate_of_change_conversion(180000000000,
120000000000,
sample.TYPE_GAUGE,
-25.0)
def test_rate_of_change_conversion_zero_delay(self):
self._do_test_rate_of_change_conversion(120000000000,
120000000000,
sample.TYPE_CUMULATIVE,
0.0,
offset=0)
def test_rate_of_change_no_predecessor(self):
s = "100.0 / (10**9 * resource_metadata.get('cpu_number', 1))"
self.pipeline_cfg[0]['transformers'] = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s}
}
},
]
self.pipeline_cfg[0]['counters'] = ['cpu']
now = timeutils.utcnow()
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=120000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_samples(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(len(publisher.samples), 0)
pipe.flush(None)
self.assertEqual(len(publisher.samples), 0)
def test_resources(self):
resources = ['test1://', 'test2://']
self.pipeline_cfg[0]['resources'] = resources
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(pipeline_manager.pipelines[0].resources,
resources)
def test_no_resources(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(len(pipeline_manager.pipelines[0].resources),
0)
| [
[
[
713,
721
],
[
36607,
36615
]
],
[
[
745,
754
],
[
2840,
2849
]
],
[
[
804,
813
],
[
5156,
5165
],
[
5314,
5323
],
[
5474,
5483
]
],
[
[
854,
858
],
[
1224,
1228
],
[
2080,
2084
]
],
[
[
899,
908
],
[
1550,
1559
],
[
5056,
5065
],
[
32675,
32684
],
[
34361,
34370
],
[
34760,
34769
],
[
36566,
36575
],
[
41892,
41901
]
],
[
[
932,
940
],
[
6018,
6026
],
[
6072,
6080
],
[
8241,
8249
],
[
8537,
8545
],
[
9274,
9282
],
[
10591,
10599
],
[
11110,
11118
],
[
11741,
11749
],
[
12115,
12123
],
[
12778,
12786
],
[
13523,
13531
],
[
14051,
14059
],
[
14546,
14554
],
[
15158,
15166
],
[
16013,
16021
],
[
18105,
18113
],
[
19520,
19528
],
[
20084,
20092
],
[
20854,
20862
],
[
22124,
22132
],
[
23425,
23433
],
[
24242,
24250
],
[
25045,
25053
],
[
25612,
25620
],
[
27105,
27113
],
[
28488,
28496
],
[
29916,
29924
],
[
30766,
30774
],
[
32797,
32805
],
[
34882,
34890
],
[
38477,
38485
],
[
42366,
42374
],
[
42929,
42937
],
[
43214,
43222
]
],
[
[
964,
973
],
[
3367,
3376
],
[
5509,
5518
]
],
[
[
1007,
1029
],
[
3133,
3147
],
[
3197,
3211
]
],
[
[
1053,
1059
],
[
1301,
1307
],
[
1355,
1361
],
[
3927,
3933
],
[
4807,
4813
],
[
4861,
4867
],
[
9630,
9636
],
[
16251,
16257
],
[
18344,
18350
],
[
25835,
25841
],
[
28711,
28717
],
[
30917,
30923
],
[
32377,
32383
],
[
32441,
32447
],
[
33419,
33425
],
[
34061,
34067
],
[
34138,
34144
],
[
34457,
34463
],
[
34537,
34543
],
[
36395,
36401
],
[
36737,
36743
],
[
37164,
37170
],
[
37592,
37598
],
[
38021,
38027
],
[
39172,
39178
],
[
39545,
39551
],
[
39851,
39857
],
[
40155,
40161
],
[
40538,
40544
],
[
40856,
40862
],
[
41160,
41166
],
[
41722,
41728
],
[
41944,
41950
],
[
42008,
42014
]
],
[
[
1083,
1094
],
[
3502,
3513
],
[
4370,
4381
],
[
5191,
5202
],
[
5349,
5360
],
[
5606,
5617
]
],
[
[
1130,
1141
],
[
1687,
1698
],
[
1927,
1938
],
[
2609,
2620
]
],
[
[
1177,
1188
],
[
2676,
2687
],
[
2738,
2749
]
],
[
[
1197,
1223
]
],
[
[
2067,
2079
],
[
4750,
4762
]
]
] |
# -*- coding: utf-8 -*-
import unittest
from tennis import TennisGame1
test_cases = [
(0, 0, "Love-All", '0-0', 'player1', 'player2'),
(1, 1, "Fifteen-All", '0-0', 'player1', 'player2'),
(2, 2, "Thirty-All", '0-0', 'player1', 'player2'),
(3, 3, "Deuce", '0-0', 'player1', 'player2'),
(4, 4, "Deuce", '0-0', 'player1', 'player2'),
(1, 0, "Fifteen-Love", '0-0', 'player1', 'player2'),
(0, 1, "Love-Fifteen", '0-0', 'player1', 'player2'),
(2, 0, "Thirty-Love", '0-0', 'player1', 'player2'),
(0, 2, "Love-Thirty", '0-0', 'player1', 'player2'),
(3, 0, "Forty-Love", '0-0', 'player1', 'player2'),
(0, 3, "Love-Forty", '0-0', 'player1', 'player2'),
(4, 0, "Love-All", '1-0', 'player1', 'player2'),
(0, 4, "Love-All", '0-1', 'player1', 'player2'),
(2, 1, "Thirty-Fifteen", '0-0', 'player1', 'player2'),
(1, 2, "Fifteen-Thirty", '0-0', 'player1', 'player2'),
(3, 1, "Forty-Fifteen", '0-0', 'player1', 'player2'),
(1, 3, "Fifteen-Forty", '0-0', 'player1', 'player2'),
(4, 1, "Love-All", '1-0', 'player1', 'player2'),
(1, 4, "Love-All", '0-1', 'player1', 'player2'),
(3, 2, "Forty-Thirty", '0-0', 'player1', 'player2'),
(2, 3, "Thirty-Forty", '0-0', 'player1', 'player2'),
(4, 2, "Love-All", '1-0', 'player1', 'player2'),
(2, 4, "Love-All", '0-1', 'player1', 'player2'),
(4, 3, "Advantage player1", '0-0', 'player1', 'player2'),
(3, 4, "Advantage player2", '0-0', 'player1', 'player2'),
(5, 4, "Advantage player1", '0-0', 'player1', 'player2'),
(4, 5, "Advantage player2", '0-0', 'player1', 'player2'),
(15, 14, "Advantage player1", '0-0', 'player1', 'player2'),
(14, 15, "Advantage player2", '0-0', 'player1', 'player2'),
(6, 4, 'Love-All', '1-0', 'player1', 'player2'),
(4, 6, 'Love-All', '0-1', 'player1', 'player2'),
(16, 14, 'Love-All', '1-0', 'player1', 'player2'),
(14, 16, 'Love-All', '0-1', 'player1', 'player2'),
(6, 4, 'Love-All', '1-0', 'One', 'player2'),
(4, 6, 'Love-All', '0-1', 'player1', 'Two'),
(6, 5, 'Advantage One', '0-0', 'One', 'player2'),
(5, 6, 'Advantage Two', '0-0', 'player1', 'Two'),
(15, 0, 'Forty-Love', '3-0', 'player1', 'Two')
]
def play_game(TennisGame, p1Points, p2Points, p1Name, p2Name):
game = TennisGame(p1Name, p2Name)
for i in range(max(p1Points, p2Points)):
if i < p1Points:
game.won_point(p1Name)
if i < p2Points:
game.won_point(p2Name)
return game
class TestTennis(unittest.TestCase):
def test_current_game_scores(self):
for testcase in test_cases:
(p1Points, p2Points, score, game_score, p1Name, p2Name) = testcase
game = play_game(TennisGame1, p1Points, p2Points, p1Name, p2Name)
self.assertEqual(score, game.score())
def test_games_scores(self):
for testcase in test_cases:
(p1Points, p2Points, score, game_score, p1Name, p2Name) = testcase
game = play_game(TennisGame1, p1Points, p2Points, p1Name, p2Name)
self.assertEqual(game_score, game.games_score())
if __name__ == "__main__":
unittest.main()
| [
[
[
32,
40
],
[
2520,
2528
],
[
3150,
3158
]
],
[
[
61,
72
],
[
2730,
2741
],
[
3007,
3018
]
],
[
[
74,
84
],
[
2610,
2620
],
[
2887,
2897
]
],
[
[
2223,
2232
],
[
2720,
2729
],
[
2997,
3006
]
],
[
[
2509,
2519
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for cros_test_lib (tests for tests? Who'd a thunk it)."""
from __future__ import print_function
import os
import subprocess
import sys
import time
import unittest
import mock
from chromite.lib import cros_test_lib
from chromite.lib import cros_build_lib
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import timeout_util
# Convenience alias
Dir = cros_test_lib.Directory
class CrosTestCaseTest(cros_test_lib.TestCase):
"""Test the cros_test_lib.TestCase."""
def testAssertStartsWith(self):
s = 'abcdef'
prefix = 'abc'
self.assertStartsWith(s, prefix)
prefix = 'def'
self.assertRaises(AssertionError, self.assertStartsWith, s, prefix)
def testAssertEndsWith(self):
s = 'abcdef'
suffix = 'abc'
self.assertRaises(AssertionError, self.assertEndsWith, s, suffix)
suffix = 'def'
self.assertEndsWith(s, suffix)
class TruthTableTest(cros_test_lib.TestCase):
"""Test TruthTable functionality."""
def _TestTableSanity(self, tt, lines):
"""Run the given truth table through basic sanity checks.
Args:
tt: A TruthTable object.
lines: The expect input lines, in order (list of tuples).
"""
# Check that more than one iterable can be used at once.
iter1 = iter(tt)
iter2 = iter(tt)
self.assertEqual(lines[0], next(iter1))
self.assertEqual(lines[0], next(iter2))
self.assertEqual(lines[1], next(iter2))
# Check that iteration again works again.
for ix, line in enumerate(tt):
self.assertEqual(lines[ix], line)
# Check direct access of input lines.
for i in range(len(tt)):
self.assertEqual(lines[i], tt.GetInputs(i))
# Check assertions on bad input to GetInputs.
self.assertRaises(ValueError, tt.GetInputs, -1)
self.assertRaises(ValueError, tt.GetInputs, len(tt))
def testTwoDimensions(self):
"""Test TruthTable behavior for two boolean inputs."""
tt = cros_test_lib.TruthTable(inputs=[(True, True), (True, False)])
self.assertEqual(len(tt), pow(2, 2))
# Check truth table output.
self.assertFalse(tt.GetOutput((False, False)))
self.assertFalse(tt.GetOutput((False, True)))
self.assertTrue(tt.GetOutput((True, False)))
self.assertTrue(tt.GetOutput((True, True)))
# Check assertions on bad input to GetOutput.
self.assertRaises(TypeError, tt.GetOutput, True)
self.assertRaises(ValueError, tt.GetOutput, (True, True, True))
# Check iteration over input lines.
lines = list(tt)
self.assertEqual((False, False), lines[0])
self.assertEqual((False, True), lines[1])
self.assertEqual((True, False), lines[2])
self.assertEqual((True, True), lines[3])
self._TestTableSanity(tt, lines)
def testFourDimensions(self):
"""Test TruthTable behavior for four boolean inputs."""
false1 = (True, True, True, False)
false2 = (True, False, True, False)
true1 = (False, True, False, True)
true2 = (True, True, False, False)
tt = cros_test_lib.TruthTable(inputs=(false1, false2), input_result=False)
self.assertEqual(len(tt), pow(2, 4))
# Check truth table output.
self.assertFalse(tt.GetOutput(false1))
self.assertFalse(tt.GetOutput(false2))
self.assertTrue(tt.GetOutput(true1))
self.assertTrue(tt.GetOutput(true2))
# Check assertions on bad input to GetOutput.
self.assertRaises(TypeError, tt.GetOutput, True)
self.assertRaises(ValueError, tt.GetOutput, (True, True, True))
# Check iteration over input lines.
lines = list(tt)
self.assertEqual((False, False, False, False), lines[0])
self.assertEqual((False, False, False, True), lines[1])
self.assertEqual((False, True, True, True), lines[7])
self.assertEqual((True, True, True, True), lines[15])
self._TestTableSanity(tt, lines)
class VerifyTarballTest(cros_test_lib.MockTempDirTestCase):
"""Test tarball verification functionality."""
TARBALL = 'fake_tarball'
def setUp(self):
self.rc_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
def _MockTarList(self, files):
"""Mock out tarball content list call.
Args:
files: A list of contents to return.
"""
self.rc_mock.AddCmdResult(
partial_mock.ListRegex('tar -tf'), output='\n'.join(files))
def testNormPath(self):
"""Test path normalization."""
tar_contents = ['./', './foo/', './foo/./a', './foo/./b']
dir_struct = [Dir('.', []), Dir('foo', ['a', 'b'])]
self._MockTarList(tar_contents)
cros_test_lib.VerifyTarball(self.TARBALL, dir_struct)
def testDuplicate(self):
"""Test duplicate detection."""
tar_contents = ['a', 'b', 'a']
dir_struct = ['a', 'b']
self._MockTarList(tar_contents)
self.assertRaises(AssertionError, cros_test_lib.VerifyTarball, self.TARBALL,
dir_struct)
class MockTestCaseTest(cros_test_lib.TestCase):
"""Tests MockTestCase functionality."""
class MyMockTestCase(cros_test_lib.MockTestCase):
"""Helper class for testing MockTestCase."""
def testIt(self):
pass
class Mockable(object):
"""Helper test class intended for having values mocked out."""
TO_BE_MOCKED = 0
TO_BE_MOCKED2 = 10
TO_BE_MOCKED3 = 20
def GetPatcher(self, attr, val):
return mock.patch('%s.MockTestCaseTest.Mockable.%s' % (__name__, attr),
new=val)
def testPatchRemovalError(self):
"""Verify that patch removal during tearDown is robust to Exceptions."""
tc = self.MyMockTestCase('testIt')
patcher = self.GetPatcher('TO_BE_MOCKED', -100)
patcher2 = self.GetPatcher('TO_BE_MOCKED2', -200)
patcher3 = self.GetPatcher('TO_BE_MOCKED3', -300)
patcher3.start()
tc.setUp()
tc.StartPatcher(patcher)
tc.StartPatcher(patcher2)
patcher.stop()
self.assertEqual(self.Mockable.TO_BE_MOCKED2, -200)
self.assertEqual(self.Mockable.TO_BE_MOCKED3, -300)
self.assertRaises(RuntimeError, tc.tearDown)
# Make sure that even though exception is raised for stopping 'patcher', we
# continue to stop 'patcher2', and run patcher.stopall().
self.assertEqual(self.Mockable.TO_BE_MOCKED2, 10)
self.assertEqual(self.Mockable.TO_BE_MOCKED3, 20)
class TestCaseTest(unittest.TestCase):
"""Tests TestCase functionality."""
def testTimeout(self):
"""Test that test cases are interrupted when they are hanging."""
class TimeoutTestCase(cros_test_lib.TestCase):
"""Test case that raises a TimeoutError because it takes too long."""
TEST_CASE_TIMEOUT = 1
def testSleeping(self):
"""Sleep for 2 minutes. This should raise a TimeoutError."""
time.sleep(2 * 60)
raise AssertionError('Test case should have timed out.')
# Run the test case, verifying it raises a TimeoutError.
test = TimeoutTestCase(methodName='testSleeping')
self.assertRaises(timeout_util.TimeoutError, test.testSleeping)
class OutputTestCaseTest(cros_test_lib.OutputTestCase,
cros_test_lib.TempDirTestCase):
"""Tests OutputTestCase functionality."""
def testStdoutAndStderr(self):
"""Check capturing stdout and stderr."""
with self.OutputCapturer():
print('foo')
print('bar', file=sys.stderr)
self.AssertOutputContainsLine('foo')
self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)
def testStdoutReadDuringCapture(self):
"""Check reading stdout mid-capture."""
with self.OutputCapturer():
print('foo')
self.AssertOutputContainsLine('foo')
print('bar')
self.AssertOutputContainsLine('bar')
self.AssertOutputContainsLine('foo')
self.AssertOutputContainsLine('bar')
def testClearCaptured(self):
"""Check writing data, clearing it, then writing more data."""
with self.OutputCapturer() as cap:
print('foo')
self.AssertOutputContainsLine('foo')
cap.ClearCaptured()
self.AssertOutputContainsLine('foo', invert=True)
print('bar')
self.AssertOutputContainsLine('bar')
@cros_test_lib.pytestmark_skip
def testRunCommandCapture(self):
"""Check capturing run() subprocess output."""
with self.OutputCapturer():
cros_build_lib.run(['sh', '-c', 'echo foo; echo bar >&2'])
self.AssertOutputContainsLine('foo')
self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)
def testCapturingStdoutAndStderrToFile(self):
"""Check that OutputCapturer captures to a named file."""
stdout_path = os.path.join(self.tempdir, 'stdout')
stderr_path = os.path.join(self.tempdir, 'stderr')
with self.OutputCapturer(stdout_path=stdout_path, stderr_path=stderr_path):
print('foo')
print('bar', file=sys.stderr)
# Check that output can be read by OutputCapturer.
self.AssertOutputContainsLine('foo')
self.AssertOutputContainsLine('bar', check_stdout=False, check_stderr=True)
# Verify that output is actually written to the correct files.
self.assertEqual('foo\n', osutils.ReadFile(stdout_path))
self.assertEqual('bar\n', osutils.ReadFile(stderr_path))
class RunCommandTestCase(cros_test_lib.RunCommandTestCase):
"""Verify the test case behavior."""
def testPopenMockEncodingEmptyStrings(self):
"""Verify our automatic encoding in PopenMock works with default output."""
self.rc.AddCmdResult(['/x'])
result = cros_build_lib.run(['/x'], capture_output=True)
self.assertEqual(b'', result.stdout)
self.assertEqual(b'', result.stderr)
result = cros_build_lib.run(['/x'], capture_output=True, encoding='utf-8')
self.assertEqual('', result.stdout)
self.assertEqual('', result.stderr)
def testPopenMockBinaryData(self):
"""Verify our automatic encoding in PopenMock works with bytes."""
self.rc.AddCmdResult(['/x'], error=b'\xff')
result = cros_build_lib.run(['/x'], capture_output=True)
self.assertEqual(b'', result.stdout)
self.assertEqual(b'\xff', result.stderr)
with self.assertRaises(UnicodeDecodeError):
cros_build_lib.run(['/x'], capture_output=True, encoding='utf-8')
def testPopenMockMixedData(self):
"""Verify our automatic encoding in PopenMock works with mixed data."""
self.rc.AddCmdResult(['/x'], error=b'abc\x00', output=u'Yes\u20a0')
result = cros_build_lib.run(['/x'], capture_output=True)
self.assertEqual(b'Yes\xe2\x82\xa0', result.stdout)
self.assertEqual(b'abc\x00', result.stderr)
result = cros_build_lib.run(['/x'], capture_output=True, encoding='utf-8')
self.assertEqual(u'Yes\u20a0', result.stdout)
self.assertEqual(u'abc\x00', result.stderr)
def testPopenMockCombiningStderr(self):
"""Verify combining stderr into stdout works."""
self.rc.AddCmdResult(['/x'], stderr='err', stdout='out')
result = cros_build_lib.run(['/x'], stdout=True, stderr=True)
self.assertEqual(b'err', result.stderr)
self.assertEqual(b'out', result.stdout)
result = cros_build_lib.run(['/x'], stdout=True, stderr=subprocess.STDOUT)
self.assertEqual(None, result.stderr)
self.assertEqual(b'outerr', result.stdout)
| [
[
[
288,
302
]
],
[
[
311,
313
],
[
8695,
8697
],
[
8750,
8752
]
],
[
[
321,
331
],
[
11174,
11184
]
],
[
[
339,
342
],
[
7429,
7432
],
[
8910,
8913
]
],
[
[
350,
354
],
[
6849,
6853
]
],
[
[
362,
370
],
[
6430,
6438
]
],
[
[
379,
383
],
[
5476,
5480
]
],
[
[
410,
423
],
[
601,
614
],
[
650,
663
],
[
1131,
1144
],
[
4049,
4062
],
[
5066,
5079
],
[
5157,
5170
],
[
7144,
7157
],
[
7199,
7212
],
[
8232,
8245
],
[
9315,
9328
],
[
2152,
2165
],
[
3202,
3215
],
[
4219,
4232
],
[
4709,
4722
],
[
4964,
4977
],
[
6611,
6624
]
],
[
[
449,
463
],
[
8386,
8400
],
[
9563,
9577
],
[
9706,
9720
],
[
10022,
10036
],
[
10210,
10224
],
[
10474,
10488
],
[
10639,
10653
],
[
10973,
10987
],
[
11127,
11141
]
],
[
[
489,
496
],
[
9196,
9203
],
[
9257,
9264
]
],
[
[
522,
534
],
[
4429,
4441
]
],
[
[
560,
572
],
[
7071,
7083
]
],
[
[
595,
598
],
[
4631,
4634
],
[
4645,
4648
]
],
[
[
633,
649
]
],
[
[
1116,
1130
]
],
[
[
4031,
4048
]
],
[
[
5049,
5065
]
],
[
[
6417,
6429
]
],
[
[
7125,
7143
]
],
[
[
9296,
9314
]
]
] |
import os
import time
import socket
from mmdet.apis import init_detector, inference_detector, show_result_pyplot, show_result_ins
import mmcv
# map
# config_file = '../configs/solo/decoupled_solo_r50_fpn_8gpu_3x.py'
# # download the checkpoint from model zoo and put it in `checkpoints/`
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R50_3x.pth'
# config_file = '../configs/solo/solo_r50_fpn_8gpu_1x.py'
# checkpoint_file = '../checkpoints/SOLO_R50_1x.pth'
#
# config_file = '../configs/solo/solo_r50_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/SOLO_R50_3x.pth'
## AP
#
# config_file = './configs/solo/solo_r101_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLO_R101_3x.pth'
# config_file = '../configs/solo/decoupled_solo_r101_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_R101_3x.pth'
# config_file = './configs/solov2/solov2_r101_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_R101_3x.pth'
# config_file = './configs/solov2/solov2_r101_dcn_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_R101_DCN_3x.pth'
# config_file = './configs/solov2/solov2_x101_dcn_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_X101_DCN_3x.pth'
## speed
# config_file = '../configs/solo/decoupled_solo_light_dcn_r50_fpn_8gpu_3x.py'
# checkpoint_file = '../checkpoints/DECOUPLED_SOLO_LIGHT_DCN_R50_3x.pth'
# config_file = './configs/solov2/solov2_light_512_dcn_r50_fpn_8gpu_3x.py'
# checkpoint_file = './checkpoints/SOLOv2_LIGHT_512_DCN_R50_3x.pth'
config_file = 'configs/solov2/solov2_light_448_r18_fpn_8gpu_3x.py'
checkpoint_file = './work_dir/0602/ps-X10DRG/solov2_light_448_r18_fpn_8gpu_3x/epoch_36.pth'
print(config_file)
# build the model from a config file and a checkpoint file
cuda_n = 0
print('gpu:', cuda_n)
os.environ['CUDA_VISIBLE_DEVICES'] = f'{cuda_n}'
model = init_detector(config_file, checkpoint_file, device=f'cuda')
#
# # test a single image
#
#
# for video_name in ['1', '2', '3']:
score_thr = 0.25
# for video_name in ['coco_72']:
# for video_name in ['Yotube-vos-3rd']:
# for video_name in ['transformed']:
save_dir = f'result/{socket.gethostname()}0530/'
# for video_name in ['cityscape_100', 'GTA5_99']:
for video_name in ['coco_72']:
# for video_name in ['Yotube-vos-3rd_rotate180']:
data_dir = f'data/{video_name}/'
out_img_dir = f"{save_dir}{config_file.split('/')[-1].split('.')[0]}/{video_name}_score_thr_{score_thr}/"
if not os.path.exists(out_img_dir):
os.makedirs(out_img_dir)
print('save', save_dir, os.path.abspath(save_dir), out_img_dir)
n = len(os.listdir(data_dir))
start = time.time()
# for i in range(1, 141):
for img in os.listdir(data_dir):
# img = f'{i}.jpg'
result = inference_detector(model, f'{data_dir}{img}')
show_result_ins(f'{data_dir}{img}', result, model.CLASSES, score_thr=score_thr, out_file=f"./{out_img_dir}{img}")
# print('save', os.path.abspath(f"../{out_img_dir}{img}"))
end = time.time()
# print()
# for img in os.listdir(directory):
# # print(f'{directory}{img}')
# # result = inference_detector(model, f'{directory}{img}')
# # show_result_ins(f'{directory}{img}', result, model.CLASSES, score_thr=0.25, out_file=f"../data/out/{img}")
# break
print('fps:', n/(end - start), 'n:', n)
| [
[
[
7,
9
],
[
1786,
1788
],
[
2439,
2441
],
[
2476,
2478
],
[
2530,
2532
],
[
2582,
2584
],
[
2673,
2675
]
],
[
[
17,
21
],
[
2616,
2620
],
[
2985,
2989
]
],
[
[
29,
35
],
[
2119,
2125
]
],
[
[
60,
73
],
[
1843,
1856
]
],
[
[
75,
93
],
[
2740,
2758
]
],
[
[
95,
113
]
],
[
[
115,
130
],
[
2794,
2809
]
],
[
[
138,
142
]
],
[
[
1511,
1522
],
[
1678,
1689
],
[
1857,
1868
],
[
2349,
2360
]
],
[
[
1578,
1593
],
[
1870,
1885
]
],
[
[
1753,
1759
],
[
1778,
1784
],
[
1826,
1832
]
],
[
[
1835,
1840
],
[
2759,
2764
],
[
2838,
2843
]
],
[
[
1970,
1979
],
[
2415,
2424
],
[
2863,
2872
]
],
[
[
2098,
2106
],
[
2339,
2347
],
[
2520,
2528
],
[
2546,
2554
]
],
[
[
2203,
2213
],
[
2304,
2314
],
[
2392,
2402
]
],
[
[
2285,
2293
],
[
2593,
2601
],
[
2684,
2692
],
[
2769,
2777
],
[
2813,
2821
]
],
[
[
2322,
2333
],
[
2454,
2465
],
[
2488,
2499
],
[
2557,
2568
],
[
2888,
2899
]
],
[
[
2574,
2575
],
[
3314,
3315
],
[
3337,
3338
]
],
[
[
2608,
2613
],
[
3323,
3328
]
],
[
[
2666,
2669
],
[
2779,
2782
],
[
2823,
2826
],
[
2901,
2904
]
],
[
[
2731,
2737
],
[
2830,
2836
]
],
[
[
2979,
2982
],
[
3317,
3320
]
]
] |
from argparse import ArgumentParser
from api import State, util, engine
import random, csv, os
from rich import print
def run_tournament(options):
'''
NOTES FOR THE CSV FILENAME
the first bot is the tracked one, the other is the opponent
for example in T_Dataset_ml-rdeep.csv
ml is the tracked player
and rdeep is the opponent
'''
botnames = options.players.split(",")
bots = [util.load_player(botname) for botname in botnames]
n = len(bots)
wins = [0] * n
matches = [(p1, p2) for p1 in range(n) for p2 in range(n) if p1 < p2]
totalgames = (n*n - n)/2 * options.repeats
playedgames, scoredgames, games_count, seeds = 0, 0, 0, []
filename = "T_Dataset_{}-{}.csv".format(botnames[options.indexed - 1], botnames[options.indexed - 2])
with os.scandir() as entries:
for entry in entries:
if filename == entry.name:
with open(filename, "r", newline="") as t_data:
hist_data = list(csv.reader(t_data))
if not hist_data == []:
seeds = [int(item[1]) for item in hist_data]
games_count = [int(item[0]) for item in hist_data][-1]
else:
games_count, seeds = 0, []
# load existing seeds
if options.existing:
seeds_file = "T_Dataset_{}.csv".format(options.existing)
with open(seeds_file, "r", newline="") as seeds_data:
seeds = [int(seed.split(",")[1]) for seed in list(seeds_data.readlines())]
else:
seeds = []
print('Playing {} scored games:'.format(int(totalgames)))
with open(filename, "a", newline="") as t_data:
t_writer = csv.writer(t_data)
if seeds:
for a, b in matches:
for seed in seeds:
p = [a, b] if random.choice([True, False]) else [b, a]
state = State.generate(id=seed, phase=int(options.phase))
winner, score = engine.play(bots[p[0]], bots[p[1]], state, options.max_time*1000, verbose=options.verbose, fast=options.fast)
if winner is not None:
winner = p[winner - 1]
wins[winner] += score
#if winner == options.indexed - 1 and score > 1:
# t_writer.writerow([games_count + scoredgames, seed])
if score > 0:
scoredgames += 1
playedgames += 1
print('Played {} games, {:.0f} scored out of {:.0f} ([yellow]{:.0f}%[/yellow]): [italic green]{}[/italic green] won, seed [red]{}[/red], [black]{}[/black] \r'
.format(playedgames, scoredgames, len(seeds), scoredgames/float(len(seeds)) * 100, botnames[winner], seed, wins))
else:
for a, b in matches:
while not scoredgames == options.repeats:
p = [a, b] if random.choice([True, False]) else [b, a]
# Generate a state with a random seed
seed = random.randint(1000000, 9999999)
while seed in seeds:
seed = random.randint(1000000, 9999999)
seeds.append(seed)
state = State.generate(id=seed, phase=int(options.phase))
winner, score = engine.play(bots[p[0]], bots[p[1]], state, options.max_time*1000, verbose=options.verbose, fast=options.fast)
if winner is not None:
winner = p[winner - 1]
wins[winner] += score
if winner == options.indexed - 1 and score > 1:
t_writer.writerow([int(totalgames), seed])
if score > 0:
scoredgames += 1
playedgames += 1
print('Played {} games, {:.0f} scored out of {:.0f} ([yellow]{:.0f}%[/yellow]): [italic green]{}[/italic green] won, seed [red]{}[/red], [black]{}[/black] \r'
.format(playedgames, scoredgames, totalgames, scoredgames/float(totalgames) * 100, botnames[winner], seed, wins))
print('Results:')
for i, bot in enumerate(bots):
games_2 = int(wins[i] / 100000)
games_3 = int(wins[i] % 100000)
print(' '*4 + 'bot {}: {} points, won {} [purple]2[/purple] point games, {} [purple]3[/purple] point games, {} total'.format(bot, wins[i], games_2, games_3, games_2 + games_3))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--starting-phase",
dest="phase",
help="Which phase the game should start at.",
default=1)
parser.add_argument("-p", "--players",
dest="players",
help="Comma-separated list of player names (enclose with quotes).",
default="rand,bully,rdeep")
parser.add_argument("-r", "--repeats",
dest="repeats",
help="How many matches to play for each pair of bots",
type=int, default=10)
parser.add_argument("-t", "--max-time",
dest="max_time",
help="maximum amount of time allowed per turn in seconds (default: 5)",
type=int, default=5)
parser.add_argument("-f", "--fast",
dest="fast",
action="store_true",
help="This option forgoes the engine's check of whether a bot is able to make a decision in the allotted time, so only use this option if you are sure that your bot is stable.")
parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="Print verbose information")
parser.add_argument("-i", "--indexed",
dest="indexed",
help="Chose the wins of which player should be tracked (player 1 / 2)",
type=int, default=1)
parser.add_argument("-e", "--existing",
dest="existing",
help="Choose which dataset to load seeds from",
type=str, default=None)
run_tournament(parser.parse_args())
| [
[
[
21,
35
],
[
4647,
4661
]
],
[
[
52,
57
],
[
1929,
1934
],
[
3338,
3343
]
],
[
[
59,
63
],
[
416,
420
]
],
[
[
65,
71
],
[
2016,
2022
],
[
3425,
3431
]
],
[
[
79,
85
],
[
1859,
1865
],
[
3005,
3011
],
[
3132,
3138
],
[
3237,
3243
]
],
[
[
87,
90
],
[
1001,
1004
],
[
1719,
1722
]
],
[
[
92,
94
],
[
806,
808
]
],
[
[
112,
117
],
[
1590,
1595
],
[
2568,
2573
],
[
3965,
3970
],
[
4271,
4276
],
[
4428,
4433
]
],
[
[
123,
137
],
[
6470,
6484
]
],
[
[
4638,
4644
],
[
4669,
4675
],
[
4863,
4869
],
[
5091,
5097
],
[
5300,
5306
],
[
5527,
5533
],
[
5852,
5858
],
[
6039,
6045
],
[
6264,
6270
],
[
6485,
6491
]
]
] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/multitracker.py
"""
import numpy as np
from collections import defaultdict
from ..matching import jde_matching as matching
from ..motion import KalmanFilter
from .base_jde_tracker import TrackState, STrack
from .base_jde_tracker import joint_stracks, sub_stracks, remove_duplicate_stracks
__all__ = ['JDETracker']
class JDETracker(object):
__shared__ = ['num_classes']
"""
JDE tracker, support single class and multi classes
Args:
num_classes (int): the number of classes
det_thresh (float): threshold of detection score
track_buffer (int): buffer for tracker
min_box_area (int): min box area to filter out low quality boxes
vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
bad results. If set <0 means no need to filter bboxes,usually set
1.6 for pedestrian tracking.
tracked_thresh (float): linear assignment threshold of tracked
stracks and detections
r_tracked_thresh (float): linear assignment threshold of
tracked stracks and unmatched detections
unconfirmed_thresh (float): linear assignment threshold of
unconfirmed stracks and unmatched detections
motion (str): motion model, KalmanFilter as default
conf_thres (float): confidence threshold for tracking
metric_type (str): either "euclidean" or "cosine", the distance metric
used for measurement to track association.
"""
def __init__(self,
use_byte=False,
num_classes=1,
det_thresh=0.3,
track_buffer=30,
min_box_area=200,
vertical_ratio=1.6,
tracked_thresh=0.7,
r_tracked_thresh=0.5,
unconfirmed_thresh=0.7,
conf_thres=0,
match_thres=0.8,
low_conf_thres=0.2,
motion='KalmanFilter',
metric_type='euclidean'):
self.use_byte = use_byte
self.num_classes = num_classes
self.det_thresh = det_thresh if not use_byte else conf_thres + 0.1
self.track_buffer = track_buffer
self.min_box_area = min_box_area
self.vertical_ratio = vertical_ratio
self.tracked_thresh = tracked_thresh
self.r_tracked_thresh = r_tracked_thresh
self.unconfirmed_thresh = unconfirmed_thresh
self.conf_thres = conf_thres
self.match_thres = match_thres
self.low_conf_thres = low_conf_thres
if motion == 'KalmanFilter':
self.motion = KalmanFilter()
self.metric_type = metric_type
self.frame_id = 0
self.tracked_tracks_dict = defaultdict(list) # dict(list[STrack])
self.lost_tracks_dict = defaultdict(list) # dict(list[STrack])
self.removed_tracks_dict = defaultdict(list) # dict(list[STrack])
self.max_time_lost = 0
# max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)
def update(self, pred_dets, pred_embs=None):
"""
Processes the image frame and finds bounding box(detections).
Associates the detection with corresponding tracklets and also handles
lost, removed, refound and active tracklets.
Args:
pred_dets (np.array): Detection results of the image, the shape is
[N, 6], means 'cls_id, score, x0, y0, x1, y1'.
pred_embs (np.array): Embedding results of the image, the shape is
[N, 128] or [N, 512].
Return:
output_stracks_dict (dict(list)): The list contains information
regarding the online_tracklets for the recieved image tensor.
"""
self.frame_id += 1
if self.frame_id == 1:
STrack.init_count(self.num_classes)
activated_tracks_dict = defaultdict(list)
refined_tracks_dict = defaultdict(list)
lost_tracks_dict = defaultdict(list)
removed_tracks_dict = defaultdict(list)
output_tracks_dict = defaultdict(list)
pred_dets_dict = defaultdict(list)
pred_embs_dict = defaultdict(list)
# unify single and multi classes detection and embedding results
for cls_id in range(self.num_classes):
cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)
pred_dets_dict[cls_id] = pred_dets[cls_idx]
if pred_embs is not None:
pred_embs_dict[cls_id] = pred_embs[cls_idx]
else:
pred_embs_dict[cls_id] = None
for cls_id in range(self.num_classes):
""" Step 1: Get detections by class"""
pred_dets_cls = pred_dets_dict[cls_id]
pred_embs_cls = pred_embs_dict[cls_id]
remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)
if remain_inds.sum() > 0:
pred_dets_cls = pred_dets_cls[remain_inds]
if self.use_byte:
detections = [
STrack(
STrack.tlbr_to_tlwh(tlbrs[2:6]),
tlbrs[1],
cls_id,
30,
temp_feat=None) for tlbrs in pred_dets_cls
]
else:
pred_embs_cls = pred_embs_cls[remain_inds]
detections = [
STrack(
STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,
30, temp_feat)
for (tlbrs, temp_feat
) in zip(pred_dets_cls, pred_embs_cls)
]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed_dict = defaultdict(list)
tracked_tracks_dict = defaultdict(list)
for track in self.tracked_tracks_dict[cls_id]:
if not track.is_activated:
# previous tracks which are not active in the current frame are added in unconfirmed list
unconfirmed_dict[cls_id].append(track)
else:
# Active tracks are added to the local list 'tracked_stracks'
tracked_tracks_dict[cls_id].append(track)
""" Step 2: First association, with embedding"""
# building tracking pool for the current frame
track_pool_dict = defaultdict(list)
track_pool_dict[cls_id] = joint_stracks(
tracked_tracks_dict[cls_id], self.lost_tracks_dict[cls_id])
# Predict the current location with KalmanFilter
STrack.multi_predict(track_pool_dict[cls_id], self.motion)
if self.use_byte:
dists = matching.iou_distance(track_pool_dict[cls_id],
detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.match_thres) # not self.tracked_thresh
else:
dists = matching.embedding_distance(
track_pool_dict[cls_id],
detections,
metric=self.metric_type)
dists = matching.fuse_motion(
self.motion, dists, track_pool_dict[cls_id], detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.tracked_thresh)
for i_tracked, idet in matches:
# i_tracked is the id of the track and idet is the detection
track = track_pool_dict[cls_id][i_tracked]
det = detections[idet]
if track.state == TrackState.Tracked:
# If the track is active, add the detection to the track
track.update(detections[idet], self.frame_id)
activated_tracks_dict[cls_id].append(track)
else:
# We have obtained a detection from a track which is not active,
# hence put the track in refind_stracks list
track.re_activate(det, self.frame_id, new_id=False)
refined_tracks_dict[cls_id].append(track)
# None of the steps below happen if there are no undetected tracks.
""" Step 3: Second association, with IOU"""
if self.use_byte:
inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres
inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres
inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)
pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]
# association the untrack to the low score detections
if len(pred_dets_cls_second) > 0:
detections_second = [
STrack(
STrack.tlbr_to_tlwh(tlbrs[:4]),
tlbrs[4],
cls_id,
30,
temp_feat=None)
for tlbrs in pred_dets_cls_second[:, :5]
]
else:
detections_second = []
r_tracked_stracks = [
track_pool_dict[cls_id][i] for i in u_track
if track_pool_dict[cls_id][i].state == TrackState.Tracked
]
dists = matching.iou_distance(r_tracked_stracks,
detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(
dists, thresh=0.4) # not r_tracked_thresh
else:
detections = [detections[i] for i in u_detection]
r_tracked_stracks = []
for i in u_track:
if track_pool_dict[cls_id][i].state == TrackState.Tracked:
r_tracked_stracks.append(track_pool_dict[cls_id][i])
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(
dists, thresh=self.r_tracked_thresh)
for i_tracked, idet in matches:
track = r_tracked_stracks[i_tracked]
det = detections[
idet] if not self.use_byte else detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_tracks_dict[cls_id].append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refined_tracks_dict[cls_id].append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_tracks_dict[cls_id].append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed_dict[cls_id], detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(
dists, thresh=self.unconfirmed_thresh)
for i_tracked, idet in matches:
unconfirmed_dict[cls_id][i_tracked].update(detections[idet],
self.frame_id)
activated_tracks_dict[cls_id].append(unconfirmed_dict[cls_id][
i_tracked])
for it in u_unconfirmed:
track = unconfirmed_dict[cls_id][it]
track.mark_removed()
removed_tracks_dict[cls_id].append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.motion, self.frame_id)
activated_tracks_dict[cls_id].append(track)
""" Step 5: Update state"""
for track in self.lost_tracks_dict[cls_id]:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_tracks_dict[cls_id].append(track)
self.tracked_tracks_dict[cls_id] = [
t for t in self.tracked_tracks_dict[cls_id]
if t.state == TrackState.Tracked
]
self.tracked_tracks_dict[cls_id] = joint_stracks(
self.tracked_tracks_dict[cls_id], activated_tracks_dict[cls_id])
self.tracked_tracks_dict[cls_id] = joint_stracks(
self.tracked_tracks_dict[cls_id], refined_tracks_dict[cls_id])
self.lost_tracks_dict[cls_id] = sub_stracks(
self.lost_tracks_dict[cls_id], self.tracked_tracks_dict[cls_id])
self.lost_tracks_dict[cls_id].extend(lost_tracks_dict[cls_id])
self.lost_tracks_dict[cls_id] = sub_stracks(
self.lost_tracks_dict[cls_id], self.removed_tracks_dict[cls_id])
self.removed_tracks_dict[cls_id].extend(removed_tracks_dict[cls_id])
self.tracked_tracks_dict[cls_id], self.lost_tracks_dict[
cls_id] = remove_duplicate_stracks(
self.tracked_tracks_dict[cls_id],
self.lost_tracks_dict[cls_id])
# get scores of lost tracks
output_tracks_dict[cls_id] = [
track for track in self.tracked_tracks_dict[cls_id]
if track.is_activated
]
return output_tracks_dict
| [
[
[
733,
744
],
[
9447,
9449
]
],
[
[
769,
780
],
[
3450,
3461
],
[
3522,
3533
],
[
3597,
3608
],
[
4614,
4625
],
[
4662,
4673
],
[
4707,
4718
],
[
4755,
4766
],
[
4802,
4813
],
[
4846,
4857
],
[
4889,
4900
],
[
6607,
6618
],
[
6659,
6670
],
[
7264,
7275
]
],
[
[
805,
829
],
[
7599,
7607
],
[
7752,
7760
],
[
7901,
7909
],
[
8076,
8084
],
[
8223,
8231
],
[
10349,
10357
],
[
10510,
10518
],
[
10938,
10946
],
[
11040,
11048
],
[
12065,
12073
],
[
12175,
12183
]
],
[
[
851,
863
],
[
3334,
3346
]
],
[
[
894,
904
],
[
8560,
8570
],
[
10288,
10298
],
[
10817,
10827
],
[
11367,
11377
],
[
11776,
11786
],
[
13481,
13491
]
],
[
[
906,
912
],
[
4546,
4552
],
[
5781,
5787
],
[
5817,
5823
],
[
6193,
6199
],
[
6229,
6235
],
[
7485,
7491
],
[
9757,
9763
],
[
9793,
9799
]
],
[
[
943,
956
],
[
7320,
7333
],
[
13561,
13574
],
[
13704,
13717
]
],
[
[
958,
969
],
[
13842,
13853
],
[
14055,
14066
]
],
[
[
971,
995
],
[
14325,
14349
]
],
[
[
997,
1004
]
],
[
[
1030,
1040
]
]
] |
import unittest
from cmpcodesize.compare import listFunctionSizes
class ListFunctionSizesTestCase(unittest.TestCase):
def test_when_size_array_is_none_raises(self):
with self.assertRaises(TypeError):
listFunctionSizes(None)
def test_when_size_array_is_empty_returns_none(self):
self.assertIsNone(listFunctionSizes([]))
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
15
],
[
101,
109
],
[
392,
400
]
],
[
[
49,
66
],
[
227,
244
],
[
336,
353
]
],
[
[
75,
100
]
]
] |
'''
数字类型:
int 整形
float 浮点型
Complex 啥东西
''' | [] |
import twint
c = twint.Config()
c.Since = "2021-02-01"
c.Until = "2021-03-14"
c.Search = "(mulher OR mulheres OR garotinha OR garotas OR menina OR garotas) AND \
((engenheira OR cientista OR arquiteta OR programação OR biologa) OR \
(engenharia OR ciência OR stem)) OR \
(matemática) OR \
(#WomenInSTEM OR #WomenInTech OR #MulheresemTIOR #MulheresEmSTEM OR #GirlsInTech OR #MulheresnaCiencia)"
c.Lang = "pt"
c.Store_csv = True
c.Output = "./Query1_2021_pt.csv"
twint.run.Search(c)
| [
[
[
7,
12
],
[
18,
23
],
[
510,
515
]
],
[
[
14,
15
],
[
33,
34
],
[
56,
57
],
[
79,
80
],
[
443,
444
],
[
457,
458
],
[
476,
477
],
[
527,
528
]
]
] |
import argparse
import pprint
import sys
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from mtcnn.config import cfg
from mtcnn.datasets.iteration_based_batch_sampler import build_batch_sampler
from mtcnn.datasets.roidb import get_roidb
from mtcnn.engine.trainer import do_train
from mtcnn.modeling.model_builder import build_model
from mtcnn.utils.logger import setup_logging
from mtcnn.utils.lr_scheduler import make_optimizer
from mtcnn.utils.lr_scheduler import make_scheduler
logger = setup_logging(__name__)
def train():
model = build_model(cfg.MODEL.TYPE)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_scheduler(cfg, optimizer)
transform = transforms.ToTensor()
roidb = get_roidb(transform=transform)
batch_sampler = build_batch_sampler(
roidb,
cfg.TRAIN.BATCH_SIZE,
shuffle=True
)
data_loader = DataLoader(roidb, batch_sampler=batch_sampler)
do_train(model, data_loader, optimizer, scheduler, device)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cfg',
dest='cfg_file',
default=None,
type=str
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
args = parse_args()
logger.info('Called with args:')
logger.info(pprint.pformat(args))
if args.cfg_file:
cfg.merge_from_file(args.cfg_file)
logger.info('Using configs:')
logger.info(pprint.pformat(cfg))
train()
if __name__ == '__main__':
main()
| [
[
[
7,
15
],
[
1135,
1143
]
],
[
[
23,
29
],
[
1471,
1477
],
[
1608,
1614
]
],
[
[
37,
40
],
[
1284,
1287
],
[
1336,
1339
]
],
[
[
49,
54
],
[
635,
640
]
],
[
[
62,
98
],
[
794,
804
]
],
[
[
128,
138
],
[
991,
1001
]
],
[
[
165,
168
],
[
606,
609
],
[
648,
651
],
[
719,
722
],
[
762,
765
],
[
924,
927
],
[
1523,
1526
],
[
1623,
1626
]
],
[
[
226,
245
],
[
880,
899
]
],
[
[
279,
288
],
[
829,
838
]
],
[
[
322,
330
],
[
1043,
1051
]
],
[
[
372,
383
],
[
594,
605
]
],
[
[
415,
428
],
[
543,
556
]
],
[
[
466,
480
],
[
704,
718
]
],
[
[
518,
532
],
[
747,
761
]
],
[
[
534,
540
],
[
1422,
1428
],
[
1459,
1465
],
[
1562,
1568
],
[
1596,
1602
]
],
[
[
573,
578
],
[
1634,
1639
]
],
[
[
1108,
1118
],
[
1405,
1415
]
],
[
[
1386,
1390
],
[
1675,
1679
]
]
] |
import abc
import decimal
import io
from typing import (
Any,
)
from eth_utils import (
big_endian_to_int,
to_normalized_address,
to_tuple,
)
from eth_abi.base import (
BaseCoder,
parse_tuple_type_str,
parse_type_str,
)
from eth_abi.exceptions import (
DecodingError,
InsufficientDataBytes,
NonEmptyPaddingBytes,
)
from eth_abi.utils.numeric import (
TEN,
abi_decimal_context,
ceil32,
)
class ContextFramesBytesIO(io.BytesIO):
"""
A byte stream which can track a series of contextual frames in a stack. This
data structure is necessary to perform nested decodings using the
:py:class:``HeadTailDecoder`` since offsets present in head sections are
relative only to a particular encoded object. These offsets can only be
used to locate a position in a decoding stream if they are paired with a
contextual offset that establishes the position of the object in which they
are found.
For example, consider the encoding of a value for the following type::
type: (int,(int,int[]))
value: (1,(2,[3,3]))
There are two tuples in this type: one inner and one outer. The inner tuple
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
This means that its value encoding will be placed in the tail section of the
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
itself, contain a tail section with the encoding for ``[3,3]``. All
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
data values are normally 32 bytes wide but have been truncated to remove the
redundant zeros at the beginnings of their encodings)::
offset data
--------------------------
^ 0 0x01
| 32 0x40 <-- Offset of object A in global frame (64)
-----|--------------------
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
| | 96 0x40 <-- Offset of object B in frame of object A (64)
-----|-Object A's frame---
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
| | 160 0x03
v v 192 0x03
--------------------------
Note that the offset of object B is encoded as 64 which only specifies the
beginning of its encoded value relative to the beginning of object A's
encoding. Globally, object B is located at offset 128. In order to make
sense out of object B's offset, it needs to be positioned in the context of
its enclosing object's frame (object A).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._frames = []
self._total_offset = 0
def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
self.seek(self._total_offset + pos, *args, **kwargs)
def push_frame(self, offset):
"""
Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset.
"""
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0)
def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos)
class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
"""
Base class for all decoder classes. Subclass this if you want to define a
custom decoder class. Subclasses must also implement
:any:`BaseCoder.from_type_str`.
"""
@abc.abstractmethod
def decode(self, stream: ContextFramesBytesIO) -> Any: # pragma: no cover
"""
Decodes the given stream of bytes into a python value. Should raise
:any:`exceptions.DecodingError` if a python value cannot be decoded
from the given byte stream.
"""
pass
def __call__(self, stream: ContextFramesBytesIO) -> Any:
return self.decode(stream)
class HeadTailDecoder(BaseDecoder):
is_dynamic = True
tail_decoder = None
def validate(self):
super().validate()
if self.tail_decoder is None:
raise ValueError("No `tail_decoder` set")
def decode(self, stream):
start_pos = decode_uint_256(stream)
stream.push_frame(start_pos)
value = self.tail_decoder(stream)
stream.pop_frame()
return value
class TupleDecoder(BaseDecoder):
decoders = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.decoders = tuple(
HeadTailDecoder(tail_decoder=d) if getattr(d, 'is_dynamic', False) else d
for d in self.decoders
)
self.is_dynamic = any(getattr(d, 'is_dynamic', False) for d in self.decoders)
def validate(self):
super().validate()
if self.decoders is None:
raise ValueError("No `decoders` set")
@to_tuple
def decode(self, stream):
for decoder in self.decoders:
yield decoder(stream)
@parse_tuple_type_str
def from_type_str(cls, abi_type, registry):
decoders = tuple(
registry.get_decoder(c.to_type_str())
for c in abi_type.components
)
return cls(decoders=decoders)
class SingleDecoder(BaseDecoder):
decoder_fn = None
def validate(self):
super().validate()
if self.decoder_fn is None:
raise ValueError("No `decoder_fn` set")
def validate_padding_bytes(self, value, padding_bytes):
raise NotImplementedError("Must be implemented by subclasses")
def decode(self, stream):
raw_data = self.read_data_from_stream(stream)
data, padding_bytes = self.split_data_and_padding(raw_data)
value = self.decoder_fn(data)
self.validate_padding_bytes(value, padding_bytes)
return value
def read_data_from_stream(self, stream):
raise NotImplementedError("Must be implemented by subclasses")
def split_data_and_padding(self, raw_data):
return raw_data, b''
class BaseArrayDecoder(BaseDecoder):
item_decoder = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Use a head-tail decoder to decode dynamic elements
if self.item_decoder.is_dynamic:
self.item_decoder = HeadTailDecoder(
tail_decoder=self.item_decoder,
)
def validate(self):
super().validate()
if self.item_decoder is None:
raise ValueError("No `item_decoder` set")
@parse_type_str(with_arrlist=True)
def from_type_str(cls, abi_type, registry):
item_decoder = registry.get_decoder(abi_type.item_type.to_type_str())
array_spec = abi_type.arrlist[-1]
if len(array_spec) == 1:
# If array dimension is fixed
return SizedArrayDecoder(
array_size=array_spec[0],
item_decoder=item_decoder,
)
else:
# If array dimension is dynamic
return DynamicArrayDecoder(item_decoder=item_decoder)
class SizedArrayDecoder(BaseArrayDecoder):
array_size = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.is_dynamic = self.item_decoder.is_dynamic
@to_tuple
def decode(self, stream):
for _ in range(self.array_size):
yield self.item_decoder(stream)
class DynamicArrayDecoder(BaseArrayDecoder):
# Dynamic arrays are always dynamic, regardless of their elements
is_dynamic = True
@to_tuple
def decode(self, stream):
array_size = decode_uint_256(stream)
stream.push_frame(32)
for _ in range(array_size):
yield self.item_decoder(stream)
stream.pop_frame()
class FixedByteSizeDecoder(SingleDecoder):
decoder_fn = None
value_bit_size = None
data_byte_size = None
is_big_endian = None
def validate(self):
super().validate()
if self.value_bit_size is None:
raise ValueError("`value_bit_size` may not be None")
if self.data_byte_size is None:
raise ValueError("`data_byte_size` may not be None")
if self.decoder_fn is None:
raise ValueError("`decoder_fn` may not be None")
if self.is_big_endian is None:
raise ValueError("`is_big_endian` may not be None")
if self.value_bit_size % 8 != 0:
raise ValueError(
"Invalid value bit size: {0}. Must be a multiple of 8".format(
self.value_bit_size,
)
)
if self.value_bit_size > self.data_byte_size * 8:
raise ValueError("Value byte size exceeds data size")
def read_data_from_stream(self, stream):
data = stream.read(self.data_byte_size)
if len(data) != self.data_byte_size:
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
self.data_byte_size,
len(data),
)
)
return data
def split_data_and_padding(self, raw_data):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if self.is_big_endian:
padding_bytes = raw_data[:padding_size]
data = raw_data[padding_size:]
else:
data = raw_data[:value_byte_size]
padding_bytes = raw_data[value_byte_size:]
return data, padding_bytes
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if padding_bytes != b'\x00' * padding_size:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
def _get_value_byte_size(self):
value_byte_size = self.value_bit_size // 8
return value_byte_size
class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
data_byte_size = 32
class BooleanDecoder(Fixed32ByteSizeDecoder):
value_bit_size = 8
is_big_endian = True
@staticmethod
def decoder_fn(data):
if data == b'\x00':
return False
elif data == b'\x01':
return True
else:
raise NonEmptyPaddingBytes(
"Boolean must be either 0x0 or 0x1. Got: {0}".format(repr(data))
)
@parse_type_str('bool')
def from_type_str(cls, abi_type, registry):
return cls()
class AddressDecoder(Fixed32ByteSizeDecoder):
value_bit_size = 20 * 8
is_big_endian = True
decoder_fn = staticmethod(to_normalized_address)
@parse_type_str('address')
def from_type_str(cls, abi_type, registry):
return cls()
#
# Unsigned Integer Decoders
#
class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
decoder_fn = staticmethod(big_endian_to_int)
is_big_endian = True
@parse_type_str('uint')
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
decode_uint_256 = UnsignedIntegerDecoder(value_bit_size=256)
#
# Signed Integer Decoders
#
class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
is_big_endian = True
def decoder_fn(self, data):
value = big_endian_to_int(data)
if value >= 2 ** (self.value_bit_size - 1):
return value - 2 ** self.value_bit_size
else:
return value
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if value >= 0:
expected_padding_bytes = b'\x00' * padding_size
else:
expected_padding_bytes = b'\xff' * padding_size
if padding_bytes != expected_padding_bytes:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
@parse_type_str('int')
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub)
#
# Bytes1..32
#
class BytesDecoder(Fixed32ByteSizeDecoder):
is_big_endian = False
@staticmethod
def decoder_fn(data):
return data
@parse_type_str('bytes')
def from_type_str(cls, abi_type, registry):
return cls(value_bit_size=abi_type.sub * 8)
class BaseFixedDecoder(Fixed32ByteSizeDecoder):
frac_places = None
is_big_endian = True
def validate(self):
super().validate()
if self.frac_places is None:
raise ValueError("must specify `frac_places`")
if self.frac_places <= 0 or self.frac_places > 80:
raise ValueError("`frac_places` must be in range (0, 80]")
class UnsignedFixedDecoder(BaseFixedDecoder):
def decoder_fn(self, data):
value = big_endian_to_int(data)
with decimal.localcontext(abi_decimal_context):
decimal_value = decimal.Decimal(value) / TEN ** self.frac_places
return decimal_value
@parse_type_str('ufixed')
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
class SignedFixedDecoder(BaseFixedDecoder):
def decoder_fn(self, data):
value = big_endian_to_int(data)
if value >= 2 ** (self.value_bit_size - 1):
signed_value = value - 2 ** self.value_bit_size
else:
signed_value = value
with decimal.localcontext(abi_decimal_context):
decimal_value = decimal.Decimal(signed_value) / TEN ** self.frac_places
return decimal_value
def validate_padding_bytes(self, value, padding_bytes):
value_byte_size = self._get_value_byte_size()
padding_size = self.data_byte_size - value_byte_size
if value >= 0:
expected_padding_bytes = b'\x00' * padding_size
else:
expected_padding_bytes = b'\xff' * padding_size
if padding_bytes != expected_padding_bytes:
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
@parse_type_str('fixed')
def from_type_str(cls, abi_type, registry):
value_bit_size, frac_places = abi_type.sub
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
#
# String and Bytes
#
class ByteStringDecoder(SingleDecoder):
is_dynamic = True
@staticmethod
def decoder_fn(data):
return data
@staticmethod
def read_data_from_stream(stream):
data_length = decode_uint_256(stream)
padded_length = ceil32(data_length)
data = stream.read(padded_length)
if len(data) < padded_length:
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
padded_length,
len(data),
)
)
padding_bytes = data[data_length:]
if padding_bytes != b'\x00' * (padded_length - data_length):
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
return data[:data_length]
def validate_padding_bytes(self, value, padding_bytes):
pass
@parse_type_str('bytes')
def from_type_str(cls, abi_type, registry):
return cls()
class StringDecoder(ByteStringDecoder):
@parse_type_str('string')
def from_type_str(cls, abi_type, registry):
return cls()
@staticmethod
def decoder_fn(data):
try:
value = data.decode("utf-8")
except UnicodeDecodeError as e:
raise DecodingError(
e.encoding,
e.object,
e.start,
e.end,
"The returned type for this function is string which is "
"expected to be a UTF8 encoded string of text. The returned "
"value could not be decoded as valid UTF8. This is indicative "
"of a broken application which is using incorrect return types for "
"binary data.") from e
return value
| [
[
[
7,
10
],
[
3826,
3829
],
[
4034,
4037
]
],
[
[
18,
25
],
[
13485,
13492
],
[
13556,
13563
],
[
14132,
14139
],
[
14203,
14210
]
],
[
[
33,
35
],
[
470,
472
]
],
[
[
61,
64
],
[
4107,
4110
],
[
4415,
4418
]
],
[
[
97,
114
],
[
11487,
11504
],
[
11877,
11894
],
[
13447,
13464
],
[
13935,
13952
]
],
[
[
120,
141
],
[
11245,
11266
]
],
[
[
147,
155
],
[
5407,
5415
],
[
7792,
7800
],
[
8061,
8069
]
],
[
[
191,
200
],
[
3805,
3814
]
],
[
[
206,
226
],
[
5524,
5544
]
],
[
[
232,
246
],
[
7054,
7068
],
[
11022,
11036
],
[
11274,
11288
],
[
11537,
11551
],
[
12571,
12585
],
[
12849,
12863
],
[
13641,
13655
],
[
14816,
14830
],
[
15970,
15984
],
[
16110,
16124
]
],
[
[
287,
300
],
[
16361,
16374
]
],
[
[
306,
327
],
[
9397,
9418
],
[
15415,
15436
]
],
[
[
333,
353
],
[
10305,
10325
],
[
10898,
10918
],
[
12449,
12469
],
[
14694,
14714
],
[
15739,
15759
]
],
[
[
397,
400
],
[
13581,
13584
],
[
14235,
14238
]
],
[
[
406,
425
],
[
13506,
13525
],
[
14153,
14172
]
],
[
[
431,
437
],
[
15295,
15301
]
],
[
[
449,
469
],
[
4082,
4102
],
[
4390,
4410
]
],
[
[
3793,
3804
],
[
4479,
4490
],
[
4910,
4921
],
[
5781,
5792
],
[
6581,
6592
]
],
[
[
4463,
4478
],
[
5058,
5073
],
[
6824,
6839
]
],
[
[
4897,
4909
]
],
[
[
5767,
5780
],
[
8311,
8324
],
[
15064,
15077
]
],
[
[
6564,
6580
],
[
7619,
7635
],
[
7944,
7960
]
],
[
[
7601,
7618
],
[
7351,
7368
]
],
[
[
7924,
7943
],
[
7546,
7565
]
],
[
[
8290,
8310
],
[
10571,
10591
]
],
[
[
10548,
10570
],
[
10641,
10663
],
[
11137,
11159
],
[
11432,
11454
],
[
11778,
11800
],
[
12727,
12749
],
[
12998,
13020
]
],
[
[
10626,
10640
]
],
[
[
11122,
11136
]
],
[
[
11409,
11431
],
[
11676,
11698
]
],
[
[
11658,
11673
],
[
4736,
4751
],
[
8121,
8136
],
[
15247,
15262
]
],
[
[
11757,
11777
]
],
[
[
12714,
12726
]
],
[
[
12981,
12997
],
[
13380,
13396
],
[
13868,
13884
]
],
[
[
13359,
13379
]
],
[
[
13849,
13867
]
],
[
[
15046,
15063
],
[
16085,
16102
]
],
[
[
16071,
16084
]
]
] |
import pytest
from blacksheep.common.files.pathsutils import (
get_file_extension_from_name,
get_mime_type_from_name,
)
@pytest.mark.parametrize(
"full_path,expected_result",
[
("hello.txt", ".txt"),
(".gitignore", ".gitignore"),
("ØØ Void.album", ".album"),
("", ""),
],
)
def test_get_file_extension_from_name(full_path, expected_result):
assert get_file_extension_from_name(full_path) == expected_result
@pytest.mark.parametrize(
"full_path,expected_result",
[
("example.ogg", "audio/ogg"),
("example.jpg", "image/jpeg"),
("example.jpeg", "image/jpeg"),
("example.png", "image/png"),
("example.js", "application/javascript"),
("example.json", "application/json"),
("example.woff2", "font/woff2"),
("hello.txt", "text/plain"),
(".gitignore", "application/octet-stream"),
("ØØ Void.album", "application/octet-stream"),
("", "application/octet-stream"),
],
)
def test_get_mime_type(full_path, expected_result):
assert get_mime_type_from_name(full_path) == expected_result
| [
[
[
7,
13
],
[
132,
138
],
[
469,
475
]
],
[
[
68,
96
],
[
407,
435
]
],
[
[
102,
125
],
[
1083,
1106
]
],
[
[
333,
366
]
],
[
[
1024,
1042
]
]
] |
# tipo = coleta caracteres digitos para dizer seu tipo, se é numerio,etc..
# ========================================================================
# titulo e coleta de dados
print("\033[33m============[ EX 004 ]============")
print(34 * "=", "\033[m")
tipo = input("digite \033[33malgo\033[m: ")
print(34 * "\033[33m=", "\033[m")
# ========================================================================
# mostra informaçoes da variavel "tipo"
print(f"({tipo}) é do tipo: \033[33m{type(tipo)}\033[m")
print(f"({tipo}) é numero? \033[33m{tipo.isalnum()}\033[m")
print(f"({tipo}) é alpha numerico? \033[33m{tipo.isalpha()}\033[m")
print(f"({tipo}) é em minusculo? \033[33m{tipo.islower()}\033[m")
print(f"({tipo}) é em maiusculo? \033[33m{tipo.isupper()}\033[m")
print(f"({tipo}) so tem espaços? \033[33m{tipo.isspace()}\033[m")
# ========================================================================
| [
[
[
255,
259
],
[
458,
462
],
[
491,
495
],
[
515,
519
],
[
542,
546
],
[
575,
579
],
[
610,
614
],
[
643,
647
],
[
676,
680
],
[
709,
713
],
[
742,
746
],
[
775,
779
],
[
808,
812
]
]
] |
# -*- coding: utf-8 -*-
"""Nexus 3 CLI
Usage:
nexus3 --help, -h
nexus3 login
nexus3 (list|ls) <repository_path>
nexus3 (upload|up) <from_src> <to_repository>
nexus3 repo create hosted maven <repo_name>
[--blob=<store_name>] [--version=<v_policy>]
[--layout=<l_policy>] [--strict-content]
[--write=<w_policy>]
nexus3 repo create hosted (bower|npm|nuget|pypi|raw|rubygems) <repo_name>
[--blob=<store_name>] [--write=<w_policy>] [--strict-content]
nexus3 repo create hosted yum <repo_name>
[--blob=<store_name>] [--write=<w_policy>]
[--depth=<repo_depth>] [--strict-content]
nexus3 repo create proxy maven <repo_name> <remote_url>
[--blob=<store_name>] [--version=<v_policy>]
[--layout=<l_policy>] [--strict-content]
nexus3 repo create proxy (bower|npm|nuget|pypi|raw|rubygems|yum)
<repo_name> <remote_url>
[--blob=<store_name>] [--strict-content]
nexus3 repo list
nexus3 repo rm <repo_name> [--force]
nexus3 script create <script.json>
nexus3 script list
nexus3 script (rm|run) <script_name>
Options:
-h --help This screen
--blob=<store_name> Use this blob with new repository [default: default]
--depth=<repo_depth> Depth (0-5) where repodata folder(s) exist [default: 0]
--force, -f Execute action without confirmation
--write=<w_policy> Accepted: allow, allow_once, deny [default: allow_once]
--layout=<l_policy> Accepted: strict, permissive [default: strict]
--version=<v_policy> Accepted: release, snapshot, mixed [default: release]
--strict-content Enable strict content type validation
Commands:
login Test login and save credentials to ~/.nexus-cli
list List all files within a path in the repository
repo create Create a repository using the format and options provided
repo list List all repositories available on the server
repo rm Not implemented; please use Nexus Web UI to remove <repo_name>
script create Create or update a script using the <script.json> file
script list List all scripts available on the server
script rm Remove existing <script_name>
script run Run the existing <script_name>
"""
import getpass
import inflect
import json
import sys
import types
from docopt import docopt
from nexuscli.exception import NexusClientConfigurationNotFound
from nexuscli.nexus_client import NexusClient
from nexuscli import repository
PLURAL = inflect.engine().plural
def _input(prompt, default=None):
"""
:return: raw_input for Python 2.x and input for Python 3.x
:rtype: function
"""
if sys.version_info < (3, 0):
real_input = raw_input # noqa - Python2
else:
real_input = input
value = real_input('{prompt} ({default}):'.format(**locals()))
if value:
return value
return default
def do_login():
nexus_url = _input('Nexus OSS URL', NexusClient.DEFAULT_URL)
nexus_user = _input('Nexus admin username', NexusClient.DEFAULT_USER)
nexus_pass = getpass.getpass(
prompt='Nexus admin password ({}):'.format(
NexusClient.DEFAULT_PASS))
if not nexus_pass:
nexus_pass = NexusClient.DEFAULT_PASS
client = NexusClient(url=nexus_url, user=nexus_user, password=nexus_pass)
client.write_config()
sys.stderr.write('\nConfiguration saved to {}\n'.format(
NexusClient.CONFIG_PATH))
def get_client():
client = NexusClient()
try:
client.read_config()
return client
except NexusClientConfigurationNotFound:
sys.stderr.write(
'Configuration not found; please run nexus-cli.py login\n')
sys.exit(1)
def cmd_script_do_list(nexus_client):
json_response = nexus_client.scripts.list()
sys.stderr.write('Name (type)\n')
for script in json_response:
sys.stdout.write('{script[name]} ({script[type]})\n'.format(
script=script))
def cmd_script_do_create(nexus_client, script_path):
script_content = json.load(open(script_path), strict=False)
nexus_client.scripts.create(script_content)
def cmd_script(args):
nexus_client = get_client()
if args.get('list'):
cmd_script_do_list(nexus_client)
elif args.get('rm'):
nexus_client.scripts.delete(args.get('<script_name>'))
elif args.get('run'):
nexus_client.scripts.run(args.get('<script_name>'))
elif args.get('create'):
cmd_script_do_create(nexus_client, args.get('<script.json>'))
else:
raise NotImplementedError
def cmd_repo_do_list(nexus_client):
json_response = nexus_client.repo_list()
output_format = '{0:40} {1:7} {2:7} {3}\n'
sys.stderr.write(output_format.format('Name', 'Format', 'Type', 'URL'))
sys.stderr.write(output_format.format('----', '------', '----', '---'))
for repo in json_response:
sys.stdout.write(output_format.format(
repo['name'], repo['format'], repo['type'], repo['url']))
def args_to_repo_format(args):
# docopt guarantees only one is True
for format_name in repository.validations.KNOWN_FORMATS:
if args.get(format_name) is True:
return format_name
def args_to_repo_type(args):
# docopt guarantees only one is True
for type_name in repository.validations.KNOWN_TYPES:
if args.get(type_name) is True:
return type_name
def cmd_repo_create(nexus_client, args):
"""Performs ``rekt repo create *`` commands"""
r = repository.Repository(
args_to_repo_type(args),
ignore_extra_kwargs=True,
name=args.get('<repo_name>'),
format=args_to_repo_format(args),
blob_store_name=args.get('--blob'),
depth=int(args.get('--depth')),
remote_url=args.get('<remote_url>'),
strict_content_type_validation=args.get('--strict-content'),
version_policy=args.get('--version'),
write_policy=args.get('--write'),
layout_policy=args.get('--layout'),
)
nexus_client.repositories.create(r)
def cmd_repo(args):
"""Performs ``nexus3 repo *`` commands"""
nexus_client = get_client()
if args.get('list'):
cmd_repo_do_list(nexus_client)
elif args.get('create'):
cmd_repo_create(nexus_client, args)
elif args.get('rm'):
if not args.get('--force'):
_input('Press ENTER to confirm deletion', 'ctrl+c to cancel')
nexus_client.repositories.delete(args.get('<repo_name>'))
else:
raise NotImplementedError
def cmd_list(args):
"""Performs ``nexus3 list``"""
nexus_client = get_client()
repository_path = args['<repository_path>']
artefact_list = nexus_client.list(repository_path)
# FIXME: is types.GeneratorType still used?
if isinstance(artefact_list, (list, types.GeneratorType)):
for artefact in iter(artefact_list):
sys.stdout.write('{}\n'.format(artefact))
return 0
else:
return 1
def _cmd_up_down_errors(count, action):
"""Print and exit with error if upload/download didn't succeed"""
if count == 0:
# FIXME: inflex the action verb to past participle
sys.stderr.write('WARNING: no files were {}\'ed.'.format(action))
sys.exit(1)
if count == -1:
sys.stderr.write('ERROR during {} operation.'.format(action))
sys.exit(2)
def cmd_upload(args):
"""Performs ``nexus3 upload``"""
nexus_client = get_client()
source = args['<from_src>']
destination = args['<to_repository>']
sys.stderr.write(
'Uploading {source} to {destination}\n'.format(**locals()))
upload_count = nexus_client.upload(source, destination)
_cmd_up_down_errors(upload_count, 'upload')
file = PLURAL('file', upload_count)
sys.stderr.write(
'Uploaded {upload_count} {file} to {destination}\n'.format(**locals()))
return 0
def main(argv=None):
arguments = docopt(__doc__, argv=argv)
if arguments.get('login'):
do_login()
NexusClient()
elif arguments.get('script'):
cmd_script(arguments)
elif arguments.get('repo'):
cmd_repo(arguments)
elif arguments.get('list') or arguments.get('ls'):
cmd_list(arguments)
elif arguments.get('upload') or arguments.get('up'):
cmd_upload(arguments)
else:
raise NotImplementedError
| [
[
[
2250,
2257
],
[
3066,
3073
]
],
[
[
2265,
2272
],
[
2489,
2496
]
],
[
[
2280,
2284
],
[
4047,
4051
]
],
[
[
2292,
2295
],
[
2656,
2659
],
[
3353,
3356
],
[
3604,
3607
],
[
3702,
3705
],
[
3807,
3810
],
[
3882,
3885
],
[
4713,
4716
],
[
4789,
4792
],
[
4900,
4903
],
[
6907,
6910
],
[
7191,
7194
],
[
7265,
7268
],
[
7306,
7309
],
[
7376,
7379
],
[
7560,
7563
],
[
7801,
7804
]
],
[
[
2303,
2308
],
[
6827,
6832
]
],
[
[
2329,
2335
],
[
7951,
7957
]
],
[
[
2368,
2400
],
[
3562,
3594
]
],
[
[
2435,
2446
],
[
2950,
2961
],
[
3023,
3034
],
[
3147,
3158
],
[
3218,
3229
],
[
3257,
3268
],
[
3418,
3429
],
[
3477,
3488
],
[
8036,
8047
]
],
[
[
2468,
2478
],
[
5106,
5116
],
[
5310,
5320
],
[
5517,
5527
]
],
[
[
2480,
2486
],
[
7768,
7774
]
],
[
[
2519,
2525
],
[
2926,
2932
],
[
2992,
2998
],
[
6374,
6380
]
],
[
[
2898,
2906
],
[
8017,
8025
]
],
[
[
3450,
3460
],
[
4181,
4191
],
[
6150,
6160
],
[
6622,
6632
],
[
7468,
7478
]
],
[
[
3720,
3738
],
[
4228,
4246
]
],
[
[
3977,
3997
],
[
4472,
4492
]
],
[
[
4144,
4154
],
[
8092,
8102
]
],
[
[
4584,
4600
],
[
6197,
6213
]
],
[
[
5015,
5034
],
[
5660,
5679
]
],
[
[
5223,
5240
],
[
5548,
5565
]
],
[
[
5421,
5436
],
[
6265,
6280
]
],
[
[
6069,
6077
],
[
8154,
8162
]
],
[
[
6552,
6560
],
[
8237,
8245
]
],
[
[
6999,
7018
],
[
7712,
7731
]
],
[
[
7394,
7404
],
[
8322,
8332
]
],
[
[
7918,
7922
]
]
] |
#! /usr/bin/python3
# Author: Maximilian Muth <mail@maxi-muth.de>
# https://github.com/mammuth/bing-wallpaper
# Version: 1.0
# License: GPL-2.0
# Description: Downloads the Bing picture of the Day and sets it as wallpaper (Linux / Windows).
import datetime
from urllib.request import urlopen, urlretrieve
from xml.dom import minidom
import os
import sys
def join_path(*args):
# Takes an list of values or multiple values and returns an valid path.
if isinstance(args[0], list):
path_list = args[0]
else:
path_list = args
val = [str(v).strip(' ') for v in path_list]
return os.path.normpath('/'.join(val))
dir_path = os.path.dirname(os.path.realpath(__file__))
save_dir = join_path(dir_path, 'images')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def set_wallpaper(pic_path):
if sys.platform.startswith('win32'):
cmd = 'REG ADD \"HKCU\Control Panel\Desktop\" /v Wallpaper /t REG_SZ /d \"%s\" /f' %pic_path
os.system(cmd)
os.system('rundll32.exe user32.dll, UpdatePerUserSystemParameters')
print('Wallpaper is set.')
elif sys.platform.startswith('linux2'):
os.system(''.join(['gsettings set org.gnome.desktop.background picture-uri file://', pic_path]))
print('Wallpaper is set.')
else:
print('OS not supported.')
return
return
def download_old_wallpapers(minus_days=False):
"""Uses download_wallpaper(set_wallpaper=False) to download the last 20 wallpapers.
If minus_days is given an integer a specific day in the past will be downloaded.
"""
if minus_days:
download_wallpaper(idx=minus_days, use_wallpaper=False)
return
for i in range(0, 20): # max 20
download_wallpaper(idx=i, use_wallpaper=False)
def download_wallpaper(idx=0, use_wallpaper=False):
# Getting the XML File
try:
usock = urlopen(''.join(['https://www.bing.com/HPImageArchive.aspx?format=xml&idx=',
str(idx), '&n=10&mkt=ru-RU'])) # ru-RU, because they always have 1920x1200 resolution
except Exception as e:
print('Error while downloading #', idx, e)
return
try:
xmldoc = minidom.parse(usock)
print(xmldoc)
# This is raised when there is trouble finding the image url.
except Exception as e:
print('Error while processing XML index #', idx, e)
return
# Parsing the XML File
print(len(xmldoc.getElementsByTagName('image')))
for image in xmldoc.getElementsByTagName('image'):
element = image.childNodes[3]
startdate = image.childNodes[0].firstChild.nodeValue
#print(element, fullstartdate)
url = 'https://www.bing.com' + element.firstChild.nodeValue
# Get Current Date as fileName for the downloaded Picture
now = datetime.datetime.now()
date = now - datetime.timedelta(days=int(idx))
#pic_path = join_path(save_dir, ''.join([date.strftime('bing_wp_%d-%m-%Y'), '.jpg']))
pic_path = join_path(save_dir, ''.join([startdate, '_', url.split("/")[-1] ]))
if os.path.isfile(pic_path):
print('Image of', date.strftime('%d-%m-%Y'), 'already downloaded.')
if use_wallpaper:
set_wallpaper(pic_path)
continue
print('Downloading: ', date.strftime('%d-%m-%Y'), 'index #', idx)
# Download and Save the Picture
# Get a higher resolution by replacing the file name
try:
urlretrieve(url.replace('_1366x768', '_1920x1200'), pic_path)
except Exception as e:
print('Error while downloading #', idx, e)
urlretrieve(url, pic_path)
# Set Wallpaper if wanted by user
if use_wallpaper:
set_wallpaper(pic_path)
if __name__ == "__main__":
download_wallpaper()
download_old_wallpapers(minus_days=False)
| [
[
[
249,
257
],
[
2836,
2844
],
[
2881,
2889
]
],
[
[
285,
292
],
[
1888,
1895
]
],
[
[
294,
305
],
[
3505,
3516
],
[
3665,
3676
]
],
[
[
326,
333
],
[
2204,
2211
]
],
[
[
341,
343
],
[
656,
658
],
[
672,
674
],
[
748,
750
],
[
778,
780
],
[
612,
614
],
[
981,
983
],
[
1004,
1006
],
[
1159,
1161
],
[
3107,
3109
]
],
[
[
351,
354
],
[
838,
841
],
[
1116,
1119
]
],
[
[
361,
370
],
[
711,
720
],
[
3028,
3037
]
],
[
[
645,
653
],
[
721,
729
]
],
[
[
700,
708
],
[
763,
771
],
[
790,
798
],
[
3038,
3046
]
],
[
[
806,
819
],
[
3259,
3272
],
[
3772,
3785
]
],
[
[
1368,
1391
],
[
3854,
3877
]
],
[
[
1788,
1806
],
[
3829,
3847
],
[
1619,
1637
],
[
1735,
1753
]
]
] |
import abc
import tempfile
import os
import typing
from core.data_block import DataBlock
Symbol = typing.Any
class DataStream(abc.ABC):
"""abstract class to represent a Data Stream
The DataStream facilitates the block interface.
From the interface standpoint, the two functions which are useful are:
- get_block(block_size) -> returns a DataBlock of the given block_size from the stream
- write_block(block) -> writes the block of data to the stream
The DataStream can act as a stream object for both writing and reading blocks
The two more useful sub-classes of the abstract class are FileDataStream and ListDataStream.
(see their description for more details)
"""
@abc.abstractmethod
def seek(self, pos: int):
"""seek a particular position in the data stream"""
pass
@abc.abstractmethod
def get_symbol(self):
"""returns a symbol from the data stream, returns None if the stream is finished
This is an abstract method, and hence needs to be implemented by the subclasses
"""
pass
def get_block(self, block_size: int) -> DataBlock:
"""returns a block of data (of the given max size) from the stream
get_block function tries to return a block of size `block_size`.
In case the remaining stream is shorter, a smaller block will be returned
Args:
block_size (int): the (max) size of the block of data to be returned.
Returns:
DataBlock:
"""
# NOTE: we implement get_block as a loop over get_symbol function
# this is not the most optimal way of imeplemting get_block (as reading a block of data at once might be faster)
# TODO: investigate faster ways of directly reading a block
data_list = []
for _ in range(block_size):
# get next symbol
s = self.get_symbol()
if s is None:
break
data_list.append(s)
# if data_list is empty, return None to signal the stream is over
if not data_list:
return None
return DataBlock(data_list)
@abc.abstractmethod
def write_symbol(self, s):
"""writes the given symbol to the stream
The symbol can be appropriately converted to a particular format before writing.
This is an abstract method and so, the subclass will have to implement it
Args:
s (Any): symbol to be written to the stream
"""
pass
def write_block(self, data_block: DataBlock):
"""write the input block to the stream
Args:
data_block (DataBlock): block to be written to the stream
"""
# NOTE: we implement write_block as a loop over write_symbol function
# this is not the most optimal way of imeplemting write_block (as writing a block of data at once might be faster)
# TODO: investigate faster ways of directly writing a block
for s in data_block.data_list:
self.write_symbol(s)
def __enter__(self):
"""function executed while opening the context
See: https://realpython.com/python-with-statement/. More details in FileDataStream.__enter__ docstring
"""
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Function executed which exiting the context
Note that the arguments exc_type, exc_value, exc_traceback are as required by python for a context
"""
pass
class ListDataStream(DataStream):
"""
ListDataStream is a wrapper around a list of symbols.
It is useful to:
- extract data from the list block by block
- write data to the list block by block
In practice, this class might be used mainly for testing
(as usually you would read data from a file.. see FileDataStream for that)
"""
def __init__(self, input_list: typing.List):
"""initialize with input_list and reset the stream
Args:
input_list (List): the list of symbols, around which the class is a wrapper
Usage:
with ListDataStream(input_list) as ds:
block = ds.get_block(block_size=5)
# do something with the block
"""
# assert whether the input_list is indeed a list
assert isinstance(input_list, list)
self.input_list = input_list
# set the position counter
self.current_ind = 0
def seek(self, pos: int):
"""set the current_ind to a particular pos"""
assert pos <= len(self.input_list)
self.current_ind = pos
def get_symbol(self) -> Symbol:
"""returns the next symbol from the self.input_list"""
# retrieve the next symbol
if self.current_ind >= len(self.input_list):
return None
s = self.input_list[self.current_ind]
# increment the current_ind counter
self.current_ind += 1
return s
def write_symbol(self, s: Symbol):
"""write a symbol to the stream"""
assert self.current_ind <= len(self.input_list)
# the case where we modify a symbol
if self.current_ind < len(self.input_list):
self.input_list[self.current_ind] = s
else:
# case where we append a symbol
self.input_list.append(s)
class FileDataStream(DataStream):
"""Abstract class to create a data stream from a File
The FileDataStream defines __exit__, __enter__ methods on top of DataStream.
These methods handle file obj opening/closing
Subclasses (eg: TextDataStream) need to imeplement methods get_symbol, write_symbol
to get a functional object.
"""
def __init__(self, file_path: str, permissions="r"):
"""Initialize the FileDataStream object
Args:
file_path (str): path of the file to read from/write to
permissions (str, optional): Permissions to open the file obj. Use "r" to read, "w" to write to
(other pyhton file obj permissions also can be used). Defaults to "r".
"""
self.file_path = file_path
self.permissions = permissions
def __enter__(self):
"""open the file object context based on the permissions specified
NOTE: One way of cleanly managing resources in python is using the with statement
as shown in the example below. This ensures the resource is released when exiting the context.
One way to support allow using with statement is defining __enter__ and __exit__ statements,
which allow for executing functions while entering or exiting the context.
Reference: https://realpython.com/python-with-statement/
Example:
with TextFileDataStream(path, "w") as fds:
# get a text block
block = fds.get_block(5)
"""
self.file_obj = open(self.file_path, self.permissions)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""close the file object at the end of context
please take a look __enter__ docstring for more info.
Reference: https://realpython.com/python-with-statement/
"""
self.file_obj.close()
def seek(self, pos: int):
"""resets the file object to the beginning"""
self.file_obj.seek(pos)
class TextFileDataStream(FileDataStream):
"""FileDataStream to read/write text data"""
def get_symbol(self):
"""get the next character from the text file
as we read character data from file by default, the get_symbol function does not need to do anything special
conversions
Returns:
(str, None): the next character, None if we reached the end of stream
"""
s = self.file_obj.read(1)
if not s:
return None
return s
def write_symbol(self, s):
"""write a character to the text file"""
self.file_obj.write(s)
class Uint8FileDataStream(FileDataStream):
"""reads Uint8 numbers written to a file
FIXME: need to immplement
"""
pass
#################################
def test_list_data_stream():
"""simple testing function to check if list data stream is getting generated correctly"""
input_list = list(range(10))
with ListDataStream(input_list) as ds:
for i in range(3):
block = ds.get_block(block_size=3)
assert block.size == 3
block = ds.get_block(block_size=2)
assert block.size == 1
block = ds.get_block(block_size=2)
assert block is None
# try seeking and reading
ds.seek(7)
block = ds.get_block(block_size=5)
assert block.size == 3
assert block.data_list[0] == 7
# try seeking and writing
ds.seek(5)
ds.write_symbol(-1)
block = ds.get_block(block_size=5)
assert block.size == 5
assert block.data_list[0] == -1
def test_file_data_stream():
"""function to test file data stream"""
# create a temporary file
with tempfile.TemporaryDirectory() as tmpdirname:
temp_file_path = os.path.join(tmpdirname, "tmp_file.txt")
# write data to the file
data_gt = DataBlock(list("This-is_a_test_file"))
with TextFileDataStream(temp_file_path, "w") as fds:
fds.write_block(data_gt)
# try seeking to correct symbol at pos 4
fds.seek(4)
fds.write_symbol("_")
# read data from the file
with TextFileDataStream(temp_file_path, "r") as fds:
block = fds.get_block(block_size=4)
assert block.size == 4
# try seeking and reading
fds.seek(4)
block = fds.get_block(block_size=4)
assert block.data_list[0] == "_"
| [
[
[
7,
10
],
[
129,
132
],
[
713,
716
],
[
841,
844
],
[
2160,
2163
]
],
[
[
18,
26
],
[
9126,
9134
]
],
[
[
34,
36
],
[
9196,
9198
]
],
[
[
44,
50
],
[
99,
105
],
[
3937,
3943
]
],
[
[
79,
88
],
[
1134,
1143
],
[
2133,
2142
],
[
2566,
2575
],
[
9289,
9298
]
],
[
[
90,
96
],
[
4681,
4687
],
[
5034,
5040
]
],
[
[
118,
128
],
[
3560,
3570
],
[
5408,
5418
]
],
[
[
3545,
3559
],
[
8357,
8371
]
],
[
[
5393,
5407
],
[
7414,
7428
],
[
8043,
8057
]
],
[
[
7395,
7413
],
[
9341,
9359
],
[
9586,
9604
]
],
[
[
8023,
8042
]
],
[
[
8196,
8217
]
],
[
[
9017,
9038
]
]
] |
def reverse_string(a_string: str):
"""Take the input a_string and return it reversed (e.g. "hello" becomes
"olleh"."""
reversed_string = ""
for i in range(len(a_string)):
reversed_string += a_string[~i]
return reversed_string
| [
[
[
4,
18
]
]
] |
#
# This file contains the Python code from Program 16.10 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by Bruno R. Preiss.
#
# Copyright (c) 2003 by Bruno R. Preiss, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm16_10.txt
#
class Graph(Container):
def breadthFirstTraversal(self, visitor, start):
assert isinstance(visitor, Visitor)
enqueued = Array(self._numberOfVertices)
for v in xrange(self._numberOfVertices):
enqueued[v] = False
queue = QueueAsLinkedList()
queue.enqueue(self[start])
enqueued[start] = True
while not queue.isEmpty and not visitor.isDone:
v = queue.dequeue()
visitor.visit(v)
for to in v.successors:
if not enqueued[to.number]:
queue.enqueue(to)
enqueued[to.number] = True
# ...
| [
[
[
309,
314
]
]
] |
from os import path, system, mkdir
from shutil import rmtree
from jinja2 import Template
from . import config
from .log import log
print(__file__)
def get_template(name):
template_path = path.join(path.dirname(__file__), 'templates/', name + ".jinja2")
with open(template_path) as file_:
template = Template(file_.read())
return template
def reload_nginx():
log.debug("Reloading nginx...")
system('service nginx reload')
log.debug("Reload complete.")
def install_config(fname, data):
outpath = path.join(config.SITES_ENABLED_DIR, fname)
log.debug(f"saving config to {outpath}")
with open(outpath, "w") as outfile:
outfile.write(data)
def install_ssl(domain):
log.info(f"Install TLS Certification for {domain}")
system(f"certbot --nginx -d {domain}")
def init_root(root):
log.debug(f"Creating Document root: {root}")
ok = mkdir_confirm(root)
def mkdir_confirm(root):
# p = path.join(config.WEB_ROOT, domain)
if not path.exists(root):
mkdir(root)
return True
else:
confirm = input(f"{root} already exists. Overwite? (y/n): ")
if config == "y":
rmtree(root)
mkdir(root)
return True
else:
log.warn(f"Aborting.")
return False | [
[
[
15,
19
],
[
194,
198
],
[
204,
208
],
[
539,
543
],
[
1007,
1011
]
],
[
[
21,
27
],
[
426,
432
],
[
782,
788
]
],
[
[
29,
34
],
[
1034,
1039
],
[
1208,
1213
]
],
[
[
54,
60
],
[
1183,
1189
]
],
[
[
80,
88
],
[
318,
326
]
],
[
[
103,
109
],
[
549,
555
],
[
1156,
1162
]
],
[
[
127,
130
],
[
390,
393
],
[
461,
464
],
[
586,
589
],
[
726,
729
],
[
847,
850
],
[
1270,
1273
]
],
[
[
153,
165
]
],
[
[
370,
382
]
],
[
[
496,
510
]
],
[
[
701,
712
]
],
[
[
826,
835
]
],
[
[
930,
943
],
[
901,
914
]
]
] |
# prefix where servers are kept
prefix = '/var/lib/mcp/servers'
# whether or not to allow server creation
creation = True
# whether to put servers and scripts in a container
container = False
# directory where the sources are kept; ignored if creation is disabled
sources = '/var/lib/mcp/sources'
# temprorary directory to build under; ignored if creation is disabled
tmp = '/tmp/mcp'
# directory where default configuration is kept; ignored if creation is disabled
config = '/var/lib/mcp/config'
# directory where the scripting libraries are kept; None to disable scripting libraries
scripting = '/var/lib/mcp/scripting'
# directory where the databases are kept
database = '/var/db/mcp'
# max size of server log files in kB before they are rotated; None to disable server log rotation
maxlogsize = 100
# range to automatically choose server ports
portrange = (4534, 4634)
# path to manager log; None to disable logging
log = '/var/log/mcp/manager.log'
# path to command output log; None to disable logging
cmdlog = '/var/log/mcp/command.log'
# path to HTTP log; None to disable logging
httpdlog = '/var/log/mcp/httpd.log'
# path to HTTP access log; None to disable logging
accesslog = '/var/log/mcp/access.log'
# template directory to use
import os.path
template = os.path.join(os.path.dirname(__file__), 'page', 'html')
resource = os.path.join(os.path.dirname(__file__), 'page', 'res')
# address and port of the web interface
addr = ('', 8000)
# address and port of the sftp server
sftpaddr = ('', 2222)
# sftp host key
sftpkey = '/var/lib/mcp/sftp/ssh_host_rsa_key'
# path to TLS/SSL key and certificate files; None to disable TLS encryption
tlskey = None
tlscert = None
# user to drop privileges to if run as root
user = 'mcp'
# how long to wait between server polls
poll_interval = 0.5
| [
[
[
32,
38
]
],
[
[
107,
115
]
],
[
[
176,
185
]
],
[
[
267,
274
]
],
[
[
372,
375
]
],
[
[
471,
477
]
],
[
[
591,
600
]
],
[
[
670,
678
]
],
[
[
794,
804
]
],
[
[
857,
866
]
],
[
[
930,
933
]
],
[
[
1018,
1024
]
],
[
[
1099,
1107
]
],
[
[
1187,
1196
]
],
[
[
1261,
1268
],
[
1280,
1282
],
[
1293,
1295
],
[
1347,
1349
],
[
1360,
1362
]
],
[
[
1269,
1277
]
],
[
[
1336,
1344
]
],
[
[
1443,
1447
]
],
[
[
1500,
1508
]
],
[
[
1539,
1546
]
],
[
[
1663,
1669
]
],
[
[
1677,
1684
]
],
[
[
1737,
1741
]
],
[
[
1791,
1804
]
]
] |
#!/usr/bin/env python
#
# Electrum - lightweight UraniumX client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import copy
import datetime
import traceback
import time
from typing import TYPE_CHECKING, Callable, Optional, List, Union
from functools import partial
from decimal import Decimal
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QTextCharFormat, QBrush, QFont, QPixmap
from PyQt5.QtWidgets import (QDialog, QLabel, QPushButton, QHBoxLayout, QVBoxLayout, QWidget, QGridLayout,
QTextEdit, QFrame, QAction, QToolButton, QMenu, QCheckBox)
import qrcode
from qrcode import exceptions
from electrum.simple_config import SimpleConfig
from electrum.util import quantize_feerate
from electrum.bitcoin import base_encode, NLOCKTIME_BLOCKHEIGHT_MAX
from electrum.i18n import _
from electrum.plugin import run_hook
from electrum import simple_config
from electrum.transaction import SerializationError, Transaction, PartialTransaction, PartialTxInput
from electrum.logging import get_logger
from .util import (MessageBoxMixin, read_QIcon, Buttons, icon_path,
MONOSPACE_FONT, ColorScheme, ButtonsLineEdit, text_dialog,
char_width_in_lineedit, TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE,
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX,
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX,
BlockingWaitingDialog, getSaveFileName, ColorSchemeItem)
from .fee_slider import FeeSlider, FeeComboBox
from .confirm_tx_dialog import TxEditor
from .amountedit import FeerateEdit, BTCAmountEdit
from .locktimeedit import LockTimeEdit
if TYPE_CHECKING:
from .main_window import ElectrumWindow
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
class TxFiatLabel(QLabel):
def setAmount(self, fiat_fee):
self.setText(('≈ %s' % fiat_fee) if fiat_fee else '')
class QTextEditWithDefaultSize(QTextEdit):
def sizeHint(self):
return QSize(0, 100)
_logger = get_logger(__name__)
dialogs = [] # Otherwise python randomly garbage collects the dialogs...
def show_transaction(tx: Transaction, *, parent: 'ElectrumWindow', desc=None, prompt_if_unsaved=False):
try:
d = TxDialog(tx, parent=parent, desc=desc, prompt_if_unsaved=prompt_if_unsaved)
except SerializationError as e:
_logger.exception('unable to deserialize the transaction')
parent.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
else:
d.show()
class BaseTxDialog(QDialog, MessageBoxMixin):
def __init__(self, *, parent: 'ElectrumWindow', desc, prompt_if_unsaved, finalized: bool, external_keypairs=None):
'''Transactions in the wallet will show their description.
Pass desc to give a description for txs not yet in the wallet.
'''
# We want to be a top-level window
QDialog.__init__(self, parent=None)
self.tx = None # type: Optional[Transaction]
self.external_keypairs = external_keypairs
self.finalized = finalized
self.main_window = parent
self.config = parent.config
self.wallet = parent.wallet
self.prompt_if_unsaved = prompt_if_unsaved
self.saved = False
self.desc = desc
self.setMinimumWidth(640)
self.resize(1200,600)
self.set_title()
self.psbt_only_widgets = [] # type: List[QWidget]
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(_("Transaction ID:")))
self.tx_hash_e = ButtonsLineEdit()
qr_show = lambda: parent.show_qrcode(str(self.tx_hash_e.text()), 'Transaction ID', parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.tx_hash_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.tx_hash_e.setReadOnly(True)
vbox.addWidget(self.tx_hash_e)
self.add_tx_stats(vbox)
vbox.addSpacing(10)
self.inputs_header = QLabel()
vbox.addWidget(self.inputs_header)
self.inputs_textedit = QTextEditWithDefaultSize()
vbox.addWidget(self.inputs_textedit)
self.txo_color_recv = TxOutputColoring(
legend=_("Receiving Address"), color=ColorScheme.GREEN, tooltip=_("Wallet receive address"))
self.txo_color_change = TxOutputColoring(
legend=_("Change Address"), color=ColorScheme.YELLOW, tooltip=_("Wallet change address"))
self.txo_color_2fa = TxOutputColoring(
legend=_("TrustedCoin (2FA) batch fee"), color=ColorScheme.BLUE, tooltip=_("TrustedCoin (2FA) fee for the next batch of transactions"))
outheader_hbox = QHBoxLayout()
outheader_hbox.setContentsMargins(0, 0, 0, 0)
vbox.addLayout(outheader_hbox)
self.outputs_header = QLabel()
outheader_hbox.addWidget(self.outputs_header)
outheader_hbox.addStretch(2)
outheader_hbox.addWidget(self.txo_color_recv.legend_label)
outheader_hbox.addWidget(self.txo_color_change.legend_label)
outheader_hbox.addWidget(self.txo_color_2fa.legend_label)
self.outputs_textedit = QTextEditWithDefaultSize()
vbox.addWidget(self.outputs_textedit)
self.sign_button = b = QPushButton(_("Sign"))
b.clicked.connect(self.sign)
self.broadcast_button = b = QPushButton(_("Broadcast"))
b.clicked.connect(self.do_broadcast)
self.save_button = b = QPushButton(_("Save"))
b.clicked.connect(self.save)
self.cancel_button = b = QPushButton(_("Close"))
b.clicked.connect(self.close)
b.setDefault(True)
self.export_actions_menu = export_actions_menu = QMenu()
self.add_export_actions_to_menu(export_actions_menu)
export_actions_menu.addSeparator()
export_submenu = export_actions_menu.addMenu(_("For CoinJoin; strip privates"))
self.add_export_actions_to_menu(export_submenu, gettx=self._gettx_for_coinjoin)
self.psbt_only_widgets.append(export_submenu)
export_submenu = export_actions_menu.addMenu(_("For hardware device; include xpubs"))
self.add_export_actions_to_menu(export_submenu, gettx=self._gettx_for_hardware_device)
self.psbt_only_widgets.append(export_submenu)
self.export_actions_button = QToolButton()
self.export_actions_button.setText(_("Export"))
self.export_actions_button.setMenu(export_actions_menu)
self.export_actions_button.setPopupMode(QToolButton.InstantPopup)
self.finalize_button = QPushButton(_('Finalize'))
self.finalize_button.clicked.connect(self.on_finalize)
partial_tx_actions_menu = QMenu()
ptx_merge_sigs_action = QAction(_("Merge signatures from"), self)
ptx_merge_sigs_action.triggered.connect(self.merge_sigs)
partial_tx_actions_menu.addAction(ptx_merge_sigs_action)
self._ptx_join_txs_action = QAction(_("Join inputs/outputs"), self)
self._ptx_join_txs_action.triggered.connect(self.join_tx_with_another)
partial_tx_actions_menu.addAction(self._ptx_join_txs_action)
self.partial_tx_actions_button = QToolButton()
self.partial_tx_actions_button.setText(_("Combine"))
self.partial_tx_actions_button.setMenu(partial_tx_actions_menu)
self.partial_tx_actions_button.setPopupMode(QToolButton.InstantPopup)
self.psbt_only_widgets.append(self.partial_tx_actions_button)
# Action buttons
self.buttons = [self.partial_tx_actions_button, self.sign_button, self.broadcast_button, self.cancel_button]
# Transaction sharing buttons
self.sharing_buttons = [self.finalize_button, self.export_actions_button, self.save_button]
run_hook('transaction_dialog', self)
if not self.finalized:
self.create_fee_controls()
vbox.addWidget(self.feecontrol_fields)
self.hbox = hbox = QHBoxLayout()
hbox.addLayout(Buttons(*self.sharing_buttons))
hbox.addStretch(1)
hbox.addLayout(Buttons(*self.buttons))
vbox.addLayout(hbox)
self.set_buttons_visibility()
dialogs.append(self)
def set_buttons_visibility(self):
for b in [self.export_actions_button, self.save_button, self.sign_button, self.broadcast_button, self.partial_tx_actions_button]:
b.setVisible(self.finalized)
for b in [self.finalize_button]:
b.setVisible(not self.finalized)
def set_tx(self, tx: 'Transaction'):
# Take a copy; it might get updated in the main window by
# e.g. the FX plugin. If this happens during or after a long
# sign operation the signatures are lost.
self.tx = tx = copy.deepcopy(tx)
try:
self.tx.deserialize()
except BaseException as e:
raise SerializationError(e)
# If the wallet can populate the inputs with more info, do it now.
# As a result, e.g. we might learn an imported address tx is segwit,
# or that a beyond-gap-limit address is is_mine.
# note: this might fetch prev txs over the network.
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet),
)
def do_broadcast(self):
self.main_window.push_top_level_window(self)
self.main_window.save_pending_invoice()
try:
self.main_window.broadcast_transaction(self.tx)
finally:
self.main_window.pop_top_level_window(self)
self.saved = True
self.update()
def closeEvent(self, event):
if (self.prompt_if_unsaved and not self.saved
and not self.question(_('This transaction is not saved. Close anyway?'), title=_("Warning"))):
event.ignore()
else:
event.accept()
try:
dialogs.remove(self)
except ValueError:
pass # was not in list already
def reject(self):
# Override escape-key to close normally (and invoke closeEvent)
self.close()
def add_export_actions_to_menu(self, menu: QMenu, *, gettx: Callable[[], Transaction] = None) -> None:
if gettx is None:
gettx = lambda: None
action = QAction(_("Copy to clipboard"), self)
action.triggered.connect(lambda: self.copy_to_clipboard(tx=gettx()))
menu.addAction(action)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
action = QAction(read_QIcon(qr_icon), _("Show as QR code"), self)
action.triggered.connect(lambda: self.show_qr(tx=gettx()))
menu.addAction(action)
action = QAction(_("Export to file"), self)
action.triggered.connect(lambda: self.export_to_file(tx=gettx()))
menu.addAction(action)
def _gettx_for_coinjoin(self) -> PartialTransaction:
if not isinstance(self.tx, PartialTransaction):
raise Exception("Can only export partial transactions for coinjoins.")
tx = copy.deepcopy(self.tx)
tx.prepare_for_export_for_coinjoin()
return tx
def _gettx_for_hardware_device(self) -> PartialTransaction:
if not isinstance(self.tx, PartialTransaction):
raise Exception("Can only export partial transactions for hardware device.")
tx = copy.deepcopy(self.tx)
tx.add_info_from_wallet(self.wallet, include_xpubs=True)
# log warning if PSBT_*_BIP32_DERIVATION fields cannot be filled with full path due to missing info
from electrum.keystore import Xpub
def is_ks_missing_info(ks):
return (isinstance(ks, Xpub) and (ks.get_root_fingerprint() is None
or ks.get_derivation_prefix() is None))
if any([is_ks_missing_info(ks) for ks in self.wallet.get_keystores()]):
_logger.warning('PSBT was requested to be filled with full bip32 paths but '
'some keystores lacked either the derivation prefix or the root fingerprint')
return tx
def copy_to_clipboard(self, *, tx: Transaction = None):
if tx is None:
tx = self.tx
self.main_window.do_copy(str(tx), title=_("Transaction"))
def show_qr(self, *, tx: Transaction = None):
if tx is None:
tx = self.tx
qr_data = tx.to_qr_data()
try:
self.main_window.show_qrcode(qr_data, 'Transaction', parent=self)
except qrcode.exceptions.DataOverflowError:
self.show_error(_('Failed to display QR code.') + '\n' +
_('Transaction is too large in size.'))
except Exception as e:
self.show_error(_('Failed to display QR code.') + '\n' + repr(e))
def sign(self):
def sign_done(success):
if self.tx.is_complete():
self.prompt_if_unsaved = True
self.saved = False
self.update()
self.main_window.pop_top_level_window(self)
self.sign_button.setDisabled(True)
self.main_window.push_top_level_window(self)
self.main_window.sign_tx(self.tx, callback=sign_done, external_keypairs=self.external_keypairs)
def save(self):
self.main_window.push_top_level_window(self)
if self.main_window.save_transaction_into_wallet(self.tx):
self.save_button.setDisabled(True)
self.saved = True
self.main_window.pop_top_level_window(self)
def export_to_file(self, *, tx: Transaction = None):
if tx is None:
tx = self.tx
if isinstance(tx, PartialTransaction):
tx.finalize_psbt()
txid = tx.txid()
suffix = txid[0:8] if txid is not None else time.strftime('%Y%m%d-%H%M')
if tx.is_complete():
extension = 'txn'
default_filter = TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX
else:
extension = 'psbt'
default_filter = TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX
name = f'{self.wallet.basename()}-{suffix}.{extension}'
fileName = getSaveFileName(
parent=self,
title=_("Select where to save your transaction"),
filename=name,
filter=TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE,
default_extension=extension,
default_filter=default_filter,
config=self.config,
)
if not fileName:
return
if tx.is_complete(): # network tx hex
with open(fileName, "w+") as f:
network_tx_hex = tx.serialize_to_network()
f.write(network_tx_hex + '\n')
else: # if partial: PSBT bytes
assert isinstance(tx, PartialTransaction)
with open(fileName, "wb+") as f:
f.write(tx.serialize_as_bytes())
self.show_message(_("Transaction exported successfully"))
self.saved = True
def merge_sigs(self):
if not isinstance(self.tx, PartialTransaction):
return
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction to merge signatures from") + ":",
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.main_window.tx_from_text(text)
if not tx:
return
try:
self.tx.combine_with_other_psbt(tx)
except Exception as e:
self.show_error(_("Error combining partial transactions") + ":\n" + repr(e))
return
self.update()
def join_tx_with_another(self):
if not isinstance(self.tx, PartialTransaction):
return
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction to join with") + " (" + _("add inputs and outputs") + "):",
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.main_window.tx_from_text(text)
if not tx:
return
try:
self.tx.join_with_other_psbt(tx)
except Exception as e:
self.show_error(_("Error joining partial transactions") + ":\n" + repr(e))
return
self.update()
def update(self):
if not self.finalized:
self.update_fee_fields()
self.finalize_button.setEnabled(self.can_finalize())
if self.tx is None:
return
self.update_io()
desc = self.desc
base_unit = self.main_window.base_unit()
format_amount = self.main_window.format_amount
format_fiat_and_units = self.main_window.format_fiat_and_units
tx_details = self.wallet.get_tx_info(self.tx)
tx_mined_status = tx_details.tx_mined_status
exp_n = tx_details.mempool_depth_bytes
amount, fee = tx_details.amount, tx_details.fee
size = self.tx.estimated_size()
txid = self.tx.txid()
fx = self.main_window.fx
tx_item_fiat = None
if (self.finalized # ensures we don't use historical rates for tx being constructed *now*
and txid is not None and fx.is_enabled() and amount is not None):
tx_item_fiat = self.wallet.get_tx_item_fiat(
tx_hash=txid, amount_sat=abs(amount), fx=fx, tx_fee=fee)
lnworker_history = self.wallet.lnworker.get_onchain_history() if self.wallet.lnworker else {}
if txid in lnworker_history:
item = lnworker_history[txid]
ln_amount = item['amount_msat'] / 1000
if amount is None:
tx_mined_status = self.wallet.lnworker.lnwatcher.get_tx_height(txid)
else:
ln_amount = None
self.broadcast_button.setEnabled(tx_details.can_broadcast)
can_sign = not self.tx.is_complete() and \
(self.wallet.can_sign(self.tx) or bool(self.external_keypairs))
self.sign_button.setEnabled(can_sign)
if self.finalized and tx_details.txid:
self.tx_hash_e.setText(tx_details.txid)
else:
# note: when not finalized, RBF and locktime changes do not trigger
# a make_tx, so the txid is unreliable, hence:
self.tx_hash_e.setText(_('Unknown'))
if not desc:
self.tx_desc.hide()
else:
self.tx_desc.setText(_("Description") + ': ' + desc)
self.tx_desc.show()
self.status_label.setText(_('Status:') + ' ' + tx_details.status)
if tx_mined_status.timestamp:
time_str = datetime.datetime.fromtimestamp(tx_mined_status.timestamp).isoformat(' ')[:-3]
self.date_label.setText(_("Date: {}").format(time_str))
self.date_label.show()
elif exp_n is not None:
text = '%.2f MB'%(exp_n/1000000)
self.date_label.setText(_('Position in mempool: {} from tip').format(text))
self.date_label.show()
else:
self.date_label.hide()
if self.tx.locktime <= NLOCKTIME_BLOCKHEIGHT_MAX:
locktime_final_str = f"LockTime: {self.tx.locktime} (height)"
else:
locktime_final_str = f"LockTime: {self.tx.locktime} ({datetime.datetime.fromtimestamp(self.tx.locktime)})"
self.locktime_final_label.setText(locktime_final_str)
if self.locktime_e.get_locktime() is None:
self.locktime_e.set_locktime(self.tx.locktime)
self.rbf_label.setText(_('Replace by fee') + f": {not self.tx.is_final()}")
if tx_mined_status.header_hash:
self.block_hash_label.setText(_("Included in block: {}")
.format(tx_mined_status.header_hash))
self.block_height_label.setText(_("At block height: {}")
.format(tx_mined_status.height))
else:
self.block_hash_label.hide()
self.block_height_label.hide()
if amount is None and ln_amount is None:
amount_str = _("Transaction unrelated to your wallet")
elif amount is None:
amount_str = ''
else:
if amount > 0:
amount_str = _("Amount received:") + ' %s'% format_amount(amount) + ' ' + base_unit
else:
amount_str = _("Amount sent:") + ' %s' % format_amount(-amount) + ' ' + base_unit
if fx.is_enabled():
if tx_item_fiat:
amount_str += ' (%s)' % tx_item_fiat['fiat_value'].to_ui_string()
else:
amount_str += ' (%s)' % format_fiat_and_units(abs(amount))
if amount_str:
self.amount_label.setText(amount_str)
else:
self.amount_label.hide()
size_str = _("Size:") + ' %d bytes'% size
if fee is None:
fee_str = _("Fee") + ': ' + _("unknown")
else:
fee_str = _("Fee") + f': {format_amount(fee)} {base_unit}'
if fx.is_enabled():
if tx_item_fiat:
fiat_fee_str = tx_item_fiat['fiat_fee'].to_ui_string()
else:
fiat_fee_str = format_fiat_and_units(fee)
fee_str += f' ({fiat_fee_str})'
if fee is not None:
fee_rate = Decimal(fee) / size # sat/byte
fee_str += ' ( %s ) ' % self.main_window.format_fee_rate(fee_rate * 1000)
if isinstance(self.tx, PartialTransaction):
if isinstance(self, PreviewTxDialog):
invoice_amt = self.tx.output_value() if self.output_value == '!' else self.output_value
else:
invoice_amt = amount
fee_warning_tuple = self.wallet.get_tx_fee_warning(
invoice_amt=invoice_amt, tx_size=size, fee=fee)
if fee_warning_tuple:
allow_send, long_warning, short_warning = fee_warning_tuple
fee_str += " - <font color={color}>{header}: {body}</font>".format(
header=_('Warning'),
body=short_warning,
color=ColorScheme.RED.as_color().name(),
)
if isinstance(self.tx, PartialTransaction):
risk_of_burning_coins = (can_sign and fee is not None
and self.wallet.get_warning_for_risk_of_burning_coins_as_fees(self.tx))
self.fee_warning_icon.setToolTip(str(risk_of_burning_coins))
self.fee_warning_icon.setVisible(bool(risk_of_burning_coins))
self.fee_label.setText(fee_str)
self.size_label.setText(size_str)
if ln_amount is None or ln_amount == 0:
ln_amount_str = ''
elif ln_amount > 0:
ln_amount_str = _('Amount received in channels') + ': ' + format_amount(ln_amount) + ' ' + base_unit
else:
assert ln_amount < 0, f"{ln_amount!r}"
ln_amount_str = _('Amount withdrawn from channels') + ': ' + format_amount(-ln_amount) + ' ' + base_unit
if ln_amount_str:
self.ln_amount_label.setText(ln_amount_str)
else:
self.ln_amount_label.hide()
show_psbt_only_widgets = self.finalized and isinstance(self.tx, PartialTransaction)
for widget in self.psbt_only_widgets:
if isinstance(widget, QMenu):
widget.menuAction().setVisible(show_psbt_only_widgets)
else:
widget.setVisible(show_psbt_only_widgets)
if tx_details.is_lightning_funding_tx:
self._ptx_join_txs_action.setEnabled(False) # would change txid
self.save_button.setEnabled(tx_details.can_save_as_local)
if tx_details.can_save_as_local:
self.save_button.setToolTip(_("Save transaction offline"))
else:
self.save_button.setToolTip(_("Transaction already saved or not yet signed."))
run_hook('transaction_dialog_update', self)
def update_io(self):
inputs_header_text = _("Inputs") + ' (%d)'%len(self.tx.inputs())
if not self.finalized:
selected_coins = self.main_window.get_manually_selected_coins()
if selected_coins is not None:
inputs_header_text += f" - " + _("Coin selection active ({} UTXOs selected)").format(len(selected_coins))
self.inputs_header.setText(inputs_header_text)
ext = QTextCharFormat()
tf_used_recv, tf_used_change, tf_used_2fa = False, False, False
def text_format(addr):
nonlocal tf_used_recv, tf_used_change, tf_used_2fa
if self.wallet.is_mine(addr):
if self.wallet.is_change(addr):
tf_used_change = True
return self.txo_color_change.text_char_format
else:
tf_used_recv = True
return self.txo_color_recv.text_char_format
elif self.wallet.is_billing_address(addr):
tf_used_2fa = True
return self.txo_color_2fa.text_char_format
return ext
def format_amount(amt):
return self.main_window.format_amount(amt, whitespaces=True)
i_text = self.inputs_textedit
i_text.clear()
i_text.setFont(QFont(MONOSPACE_FONT))
i_text.setReadOnly(True)
cursor = i_text.textCursor()
for txin in self.tx.inputs():
if txin.is_coinbase_input():
cursor.insertText('coinbase')
else:
prevout_hash = txin.prevout.txid.hex()
prevout_n = txin.prevout.out_idx
cursor.insertText(prevout_hash + ":%-4d " % prevout_n, ext)
addr = self.wallet.get_txin_address(txin)
if addr is None:
addr = ''
cursor.insertText(addr, text_format(addr))
txin_value = self.wallet.get_txin_value(txin)
if txin_value is not None:
cursor.insertText(format_amount(txin_value), ext)
cursor.insertBlock()
self.outputs_header.setText(_("Outputs") + ' (%d)'%len(self.tx.outputs()))
o_text = self.outputs_textedit
o_text.clear()
o_text.setFont(QFont(MONOSPACE_FONT))
o_text.setReadOnly(True)
cursor = o_text.textCursor()
for o in self.tx.outputs():
addr, v = o.get_ui_address_str(), o.value
cursor.insertText(addr, text_format(addr))
if v is not None:
cursor.insertText('\t', ext)
cursor.insertText(format_amount(v), ext)
cursor.insertBlock()
self.txo_color_recv.legend_label.setVisible(tf_used_recv)
self.txo_color_change.legend_label.setVisible(tf_used_change)
self.txo_color_2fa.legend_label.setVisible(tf_used_2fa)
def add_tx_stats(self, vbox):
hbox_stats = QHBoxLayout()
# left column
vbox_left = QVBoxLayout()
self.tx_desc = TxDetailLabel(word_wrap=True)
vbox_left.addWidget(self.tx_desc)
self.status_label = TxDetailLabel()
vbox_left.addWidget(self.status_label)
self.date_label = TxDetailLabel()
vbox_left.addWidget(self.date_label)
self.amount_label = TxDetailLabel()
vbox_left.addWidget(self.amount_label)
self.ln_amount_label = TxDetailLabel()
vbox_left.addWidget(self.ln_amount_label)
fee_hbox = QHBoxLayout()
self.fee_label = TxDetailLabel()
fee_hbox.addWidget(self.fee_label)
self.fee_warning_icon = QLabel()
pixmap = QPixmap(icon_path("warning"))
pixmap_size = round(2 * char_width_in_lineedit())
pixmap = pixmap.scaled(pixmap_size, pixmap_size, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.fee_warning_icon.setPixmap(pixmap)
self.fee_warning_icon.setVisible(False)
fee_hbox.addWidget(self.fee_warning_icon)
fee_hbox.addStretch(1)
vbox_left.addLayout(fee_hbox)
vbox_left.addStretch(1)
hbox_stats.addLayout(vbox_left, 50)
# vertical line separator
line_separator = QFrame()
line_separator.setFrameShape(QFrame.VLine)
line_separator.setFrameShadow(QFrame.Sunken)
line_separator.setLineWidth(1)
hbox_stats.addWidget(line_separator)
# right column
vbox_right = QVBoxLayout()
self.size_label = TxDetailLabel()
vbox_right.addWidget(self.size_label)
self.rbf_label = TxDetailLabel()
vbox_right.addWidget(self.rbf_label)
self.rbf_cb = QCheckBox(_('Replace by fee'))
self.rbf_cb.setChecked(bool(self.config.get('use_rbf', True)))
vbox_right.addWidget(self.rbf_cb)
self.locktime_final_label = TxDetailLabel()
vbox_right.addWidget(self.locktime_final_label)
locktime_setter_hbox = QHBoxLayout()
locktime_setter_hbox.setContentsMargins(0, 0, 0, 0)
locktime_setter_hbox.setSpacing(0)
locktime_setter_label = TxDetailLabel()
locktime_setter_label.setText("LockTime: ")
self.locktime_e = LockTimeEdit(self)
locktime_setter_hbox.addWidget(locktime_setter_label)
locktime_setter_hbox.addWidget(self.locktime_e)
locktime_setter_hbox.addStretch(1)
self.locktime_setter_widget = QWidget()
self.locktime_setter_widget.setLayout(locktime_setter_hbox)
vbox_right.addWidget(self.locktime_setter_widget)
self.block_height_label = TxDetailLabel()
vbox_right.addWidget(self.block_height_label)
vbox_right.addStretch(1)
hbox_stats.addLayout(vbox_right, 50)
vbox.addLayout(hbox_stats)
# below columns
self.block_hash_label = TxDetailLabel(word_wrap=True)
vbox.addWidget(self.block_hash_label)
# set visibility after parenting can be determined by Qt
self.rbf_label.setVisible(self.finalized)
self.rbf_cb.setVisible(not self.finalized)
self.locktime_final_label.setVisible(self.finalized)
self.locktime_setter_widget.setVisible(not self.finalized)
def set_title(self):
self.setWindowTitle(_("Create transaction") if not self.finalized else _("Transaction"))
def can_finalize(self) -> bool:
return False
def on_finalize(self):
pass # overridden in subclass
def update_fee_fields(self):
pass # overridden in subclass
class TxDetailLabel(QLabel):
def __init__(self, *, word_wrap=None):
super().__init__()
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
if word_wrap is not None:
self.setWordWrap(word_wrap)
class TxOutputColoring:
# used for both inputs and outputs
def __init__(
self,
*,
legend: str,
color: ColorSchemeItem,
tooltip: str,
):
self.color = color.as_color(background=True)
self.legend_label = QLabel("<font color={color}>{box_char}</font> = {label}".format(
color=self.color.name(),
box_char="█",
label=legend,
))
font = self.legend_label.font()
font.setPointSize(font.pointSize() - 1)
self.legend_label.setFont(font)
self.legend_label.setVisible(False)
self.text_char_format = QTextCharFormat()
self.text_char_format.setBackground(QBrush(self.color))
self.text_char_format.setToolTip(tooltip)
class TxDialog(BaseTxDialog):
def __init__(self, tx: Transaction, *, parent: 'ElectrumWindow', desc, prompt_if_unsaved):
BaseTxDialog.__init__(self, parent=parent, desc=desc, prompt_if_unsaved=prompt_if_unsaved, finalized=True)
self.set_tx(tx)
self.update()
class PreviewTxDialog(BaseTxDialog, TxEditor):
def __init__(
self,
*,
make_tx,
external_keypairs,
window: 'ElectrumWindow',
output_value: Union[int, str],
):
TxEditor.__init__(
self,
window=window,
make_tx=make_tx,
is_sweep=bool(external_keypairs),
output_value=output_value,
)
BaseTxDialog.__init__(self, parent=window, desc='', prompt_if_unsaved=False,
finalized=False, external_keypairs=external_keypairs)
BlockingWaitingDialog(window, _("Preparing transaction..."),
lambda: self.update_tx(fallback_to_zero_fee=True))
self.update()
def create_fee_controls(self):
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.fiat_fee_label = TxFiatLabel()
self.fiat_fee_label.setAlignment(Qt.AlignCenter)
self.fiat_fee_label.setAmount(0)
self.fiat_fee_label.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(self.on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(self.on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.main_window.get_decimal_point)
self.fee_e.textEdited.connect(partial(self.on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(self.on_fee_or_feerate, self.fee_e, True))
self.fee_e.textChanged.connect(self.entry_changed)
self.feerate_e.textChanged.connect(self.entry_changed)
self.fee_slider = FeeSlider(self, self.config, self.fee_slider_callback)
self.fee_combo = FeeComboBox(self.fee_slider)
self.fee_slider.setFixedWidth(self.fee_e.width())
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QToolButton()
self.feerounding_icon.setIcon(read_QIcon('info.png'))
self.feerounding_icon.setAutoRaise(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.feecontrol_fields = QWidget()
hbox = QHBoxLayout(self.feecontrol_fields)
hbox.setContentsMargins(0, 0, 0, 0)
grid = QGridLayout()
grid.addWidget(QLabel(_("Target fee:")), 0, 0)
grid.addWidget(self.feerate_e, 0, 1)
grid.addWidget(self.size_e, 0, 2)
grid.addWidget(self.fee_e, 0, 3)
grid.addWidget(self.feerounding_icon, 0, 4)
grid.addWidget(self.fiat_fee_label, 0, 5)
grid.addWidget(self.fee_slider, 1, 1)
grid.addWidget(self.fee_combo, 1, 2)
hbox.addLayout(grid)
hbox.addStretch(1)
def fee_slider_callback(self, dyn, pos, fee_rate):
super().fee_slider_callback(dyn, pos, fee_rate)
self.fee_slider.activate()
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
def on_fee_or_feerate(self, edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update()
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def get_fee_estimator(self):
if self.is_send_fee_frozen() and self.fee_e.get_amount() is not None:
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen() and self.feerate_e.get_amount() is not None:
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def entry_changed(self):
# blue color denotes auto-filled values
text = ""
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
fee_color = ColorScheme.RED
feerate_color = ColorScheme.RED
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
else:
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
#
self.needs_update = True
def update_fee_fields(self):
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
tx = self.tx
if self.no_dynfee_estimates and tx:
size = tx.estimated_size()
self.size_e.setAmount(size)
if self.not_enough_funds or self.no_dynfee_estimates:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
return
assert tx is not None
size = tx.estimated_size()
fee = tx.get_fee()
self.size_e.setAmount(size)
fiat_fee = self.main_window.format_fiat_and_units(fee)
self.fiat_fee_label.setAmount(fiat_fee)
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
elif self.fee_slider.is_active():
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if (fee and displayed_fee is not None) else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
def can_finalize(self):
return (self.tx is not None
and not self.not_enough_funds)
def on_finalize(self):
if not self.can_finalize():
return
assert self.tx
self.finalized = True
self.tx.set_rbf(self.rbf_cb.isChecked())
locktime = self.locktime_e.get_locktime()
if locktime is not None:
self.tx.locktime = locktime
for widget in [self.fee_slider, self.fee_combo, self.feecontrol_fields, self.rbf_cb,
self.locktime_setter_widget, self.locktime_e]:
widget.setEnabled(False)
widget.setVisible(False)
for widget in [self.rbf_label, self.locktime_final_label]:
widget.setVisible(True)
self.set_title()
self.set_buttons_visibility()
self.update()
| [
[
[
1173,
1176
]
],
[
[
1184,
1188
],
[
9921,
9925
],
[
12305,
12309
],
[
12614,
12618
]
],
[
[
1196,
1204
],
[
20019,
20027
],
[
20662,
20670
]
],
[
[
1212,
1221
]
],
[
[
1229,
1233
],
[
15031,
15035
]
],
[
[
1253,
1266
],
[
2731,
2744
]
],
[
[
1268,
1276
],
[
11416,
11424
]
],
[
[
1278,
1286
]
],
[
[
1288,
1292
]
],
[
[
1294,
1299
],
[
33481,
33486
]
],
[
[
1322,
1329
],
[
34642,
34649
],
[
34745,
34752
],
[
34910,
34917
],
[
35005,
35012
],
[
38950,
38957
]
],
[
[
1350,
1357
],
[
22746,
22753
],
[
37115,
37122
]
],
[
[
1384,
1389
],
[
3140,
3145
]
],
[
[
1391,
1393
],
[
29246,
29248
],
[
29266,
29268
],
[
32083,
32085
],
[
34148,
34150
],
[
34354,
34356
]
],
[
[
1418,
1433
],
[
25889,
25904
],
[
32844,
32859
]
],
[
[
1435,
1441
],
[
32906,
32912
]
],
[
[
1443,
1448
],
[
26760,
26765
],
[
27733,
27738
]
],
[
[
1450,
1457
],
[
29101,
29108
]
],
[
[
1487,
1494
],
[
3720,
3727
],
[
4068,
4075
]
],
[
[
1496,
1502
],
[
2810,
2816
],
[
2950,
2956
],
[
31967,
31973
],
[
4685,
4691
],
[
5190,
5196
],
[
6009,
6015
],
[
29075,
29081
],
[
32475,
32481
],
[
36515,
36521
]
],
[
[
1504,
1515
],
[
6449,
6460
],
[
6546,
6557
],
[
6651,
6662
],
[
6745,
6756
],
[
7755,
7766
]
],
[
[
1517,
1528
],
[
5872,
5883
],
[
9126,
9137
],
[
28393,
28404
],
[
28945,
28956
],
[
30380,
30391
],
[
36383,
36394
]
],
[
[
1530,
1541
],
[
4618,
4629
],
[
28450,
28461
],
[
29885,
29896
]
],
[
[
1543,
1550
],
[
30841,
30848
],
[
36358,
36365
]
],
[
[
1552,
1563
],
[
36478,
36489
]
],
[
[
1594,
1603
],
[
3089,
3098
]
],
[
[
1605,
1611
],
[
29643,
29649
],
[
29689,
29695
],
[
29741,
29747
]
],
[
[
1613,
1620
],
[
7920,
7927
],
[
8128,
8135
],
[
11536,
11543
],
[
11782,
11789
],
[
11955,
11962
]
],
[
[
1622,
1633
],
[
7515,
7526
],
[
7697,
7708
],
[
8357,
8368
],
[
8556,
8567
],
[
36084,
36095
]
],
[
[
1635,
1640
],
[
6892,
6897
],
[
7880,
7885
],
[
11399,
11404
],
[
24830,
24835
]
],
[
[
1642,
1651
],
[
30095,
30104
]
],
[
[
1660,
1666
],
[
13762,
13768
]
],
[
[
1686,
1696
]
],
[
[
1733,
1745
],
[
38975,
38987
]
],
[
[
1772,
1788
],
[
37170,
37186
],
[
41021,
41037
],
[
41180,
41196
],
[
41812,
41828
]
],
[
[
1818,
1829
]
],
[
[
1831,
1856
],
[
20481,
20506
]
],
[
[
1883,
1884
],
[
3597,
3598
],
[
4692,
4693
],
[
4996,
4997
],
[
5413,
5414
],
[
5470,
5471
],
[
5568,
5569
],
[
5623,
5624
],
[
5717,
5718
],
[
5783,
5784
],
[
6461,
6462
],
[
6558,
6559
],
[
6663,
6664
],
[
6757,
6758
],
[
7057,
7058
],
[
7287,
7288
],
[
7572,
7573
],
[
7767,
7768
],
[
7928,
7929
],
[
8136,
8137
],
[
8418,
8419
],
[
10391,
10392
],
[
10961,
10962
],
[
11018,
11019
],
[
11544,
11545
],
[
11811,
11812
],
[
11963,
11964
],
[
13505,
13506
],
[
13827,
13828
],
[
13896,
13897
],
[
13995,
13996
],
[
15466,
15467
],
[
16182,
16183
],
[
16421,
16422
],
[
16475,
16476
],
[
16545,
16546
],
[
16857,
16858
],
[
17142,
17143
],
[
17196,
17197
],
[
17235,
17236
],
[
17292,
17293
],
[
17601,
17602
],
[
19705,
19706
],
[
19819,
19820
],
[
19917,
19918
],
[
20134,
20135
],
[
20314,
20315
],
[
20918,
20919
],
[
21054,
21055
],
[
21205,
21206
],
[
21479,
21480
],
[
21648,
21649
],
[
21766,
21767
],
[
22230,
22231
],
[
22307,
22308
],
[
22325,
22326
],
[
22374,
22375
],
[
23519,
23520
],
[
24255,
24256
],
[
24433,
24434
],
[
25257,
25258
],
[
25342,
25343
],
[
25501,
25502
],
[
25744,
25745
],
[
27601,
27602
],
[
30105,
30106
],
[
31678,
31679
],
[
31729,
31730
],
[
33908,
33909
],
[
36522,
36523
],
[
38404,
38405
],
[
35482,
35483
],
[
35622,
35623
],
[
35712,
35713
],
[
35762,
35763
],
[
35809,
35810
],
[
35900,
35901
],
[
36022,
36023
]
],
[
[
1913,
1921
],
[
8941,
8949
],
[
25402,
25410
]
],
[
[
1943,
1956
]
],
[
[
1990,
2008
],
[
3476,
3494
],
[
10039,
10057
]
],
[
[
2010,
2021
],
[
3289,
3300
],
[
11429,
11440
],
[
13388,
13399
],
[
13553,
13564
],
[
14807,
14818
],
[
33035,
33046
]
],
[
[
2023,
2041
],
[
12133,
12151
],
[
12188,
12206
],
[
12436,
12454
],
[
12491,
12509
],
[
14902,
14920
],
[
16041,
16059
],
[
16310,
16328
],
[
17031,
17049
],
[
22900,
22918
],
[
23695,
23713
],
[
24730,
24748
]
],
[
[
2043,
2057
]
],
[
[
2087,
2097
],
[
3167,
3177
]
],
[
[
2118,
2133
],
[
3729,
3744
]
],
[
[
2135,
2145
],
[
11790,
11800
],
[
36136,
36146
]
],
[
[
2147,
2154
],
[
9163,
9170
],
[
9245,
9252
]
],
[
[
2156,
2165
],
[
29109,
29118
]
],
[
[
2186,
2200
],
[
26766,
26780
],
[
27739,
27753
]
],
[
[
2202,
2213
],
[
4903,
4914
],
[
5443,
5454
],
[
5595,
5606
],
[
5757,
5768
],
[
11723,
11734
],
[
23607,
23618
],
[
34231,
34242
],
[
34453,
34464
],
[
39214,
39225
],
[
39258,
39269
],
[
39336,
39347
],
[
39380,
39391
],
[
39462,
39473
],
[
39545,
39556
],
[
39600,
39611
],
[
39645,
39656
]
],
[
[
2215,
2230
],
[
4741,
4756
]
],
[
[
2232,
2243
],
[
16365,
16376
],
[
17086,
17097
]
],
[
[
2264,
2286
],
[
29163,
29185
]
],
[
[
2288,
2330
],
[
15556,
15598
]
],
[
[
2351,
2401
],
[
15148,
15198
]
],
[
[
2422,
2471
],
[
15273,
15322
]
],
[
[
2492,
2513
],
[
10338,
10359
],
[
33878,
33899
]
],
[
[
2515,
2530
],
[
15406,
15421
]
],
[
[
2532,
2547
],
[
32344,
32359
]
],
[
[
2574,
2583
],
[
35206,
35215
]
],
[
[
2585,
2596
],
[
35286,
35297
]
],
[
[
2628,
2636
],
[
33302,
33310
],
[
33513,
33521
]
],
[
[
2661,
2672
],
[
34516,
34527
]
],
[
[
2674,
2687
],
[
34822,
34835
]
],
[
[
2714,
2726
],
[
30623,
30635
]
],
[
[
2775,
2789
]
],
[
[
2798,
2809
],
[
34101,
34112
]
],
[
[
2938,
2949
],
[
34299,
34310
]
],
[
[
3064,
3088
],
[
5273,
5297
],
[
6344,
6368
]
],
[
[
3157,
3164
],
[
3509,
3516
],
[
13147,
13154
]
],
[
[
3188,
3195
],
[
9345,
9352
],
[
11135,
11142
]
],
[
[
3268,
3284
]
],
[
[
3707,
3719
],
[
32993,
33005
],
[
33288,
33300
],
[
33111,
33123
],
[
33709,
33721
]
],
[
[
31953,
31966
],
[
28487,
28500
],
[
28587,
28600
],
[
28676,
28689
],
[
28765,
28778
],
[
28859,
28872
],
[
28984,
28997
],
[
29925,
29938
],
[
30012,
30025
],
[
30276,
30289
],
[
30529,
30542
],
[
31012,
31025
],
[
31253,
31266
]
],
[
[
32191,
32207
],
[
5376,
5392
],
[
5531,
5547
],
[
5680,
5696
]
],
[
[
32984,
32992
],
[
3389,
3397
]
],
[
[
33272,
33287
],
[
22957,
22972
]
]
] |
from collections import defaultdict
import json
import mmap
import operator
import os
import socket
from struct import Struct
MMDB_META_DATA_START = '\xAB\xCD\xEFMaxMind.com'
MMDB_META_DATA_BLOCK_MAX_SIZE = 131072
MMDB_DATA_SECTION_SEPARATOR = 16
unpack_int = Struct('>I').unpack
unpack_long = Struct('>Q').unpack
unpack_short = Struct('>H').unpack
class GeoIP(object):
"""Container for a GEOIP address"""
__slots__ = ('ip', 'data')
def __init__(self, ip, data):
self.ip = ip
self.data = data
@property
def country(self):
if 'country' in self.data:
return self.data['country']['iso_code']
@property
def country_en(self):
if 'country' in self.data:
return self.data['country']['names']['en']
@property
def continent(self):
if 'continent' in self.data:
return self.data['continent']['code']
@property
def state(self):
return ', '.join([x['iso_code'] for x in self.data.get('subdivisions') or ()
if 'iso_code' in x])
@property
def postal(self):
if 'postal' in self.data:
return self.data['postal'].get('code')
@property
def city(self):
if 'city' in self.data:
return self.data['city']['names']['en']
@property
def timezone(self):
if 'location' in self.data:
return self.data['location'].get('time_zone')
@property
def location(self):
if 'location' in self.data:
lat = self.data['location'].get('latitude')
long = self.data['location'].get('longitude')
if lat is not None and long is not None:
return lat, long
def to_dict(self):
return {
'ip': self.ip,
'country': self.country,
'continent': self.continent,
'state': self.state,
'city': self.city,
'postal': self.postal,
'timezone': self.timezone,
'location': self.location,
}
def pack_ip(ip):
for fmly in socket.AF_INET, socket.AF_INET6:
try:
return socket.inet_pton(fmly, ip)
except socket.error:
continue
raise ValueError('Malformed IP address')
class MMDB(object):
"""Context manager to query MaxMind database"""
def __init__(self, filename, buffer, meta_data):
self.closed = False
self.filename = filename
self.is_ipv6 = meta_data['ip_version'] == 6
self.nodes = meta_data['node_count']
self.record_size = meta_data['record_size']
self.node_size = self.record_size / 4
self.db_size = self.nodes * self.node_size
self.buffer = buffer
self.meta_data = meta_data
self.reader = MMDBParser(buffer, self.db_size)
self.ipv4_start = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def close(self):
self.closed = True
self.buffer.close()
def lookup(self, ip_addr):
if self.closed:
raise RuntimeError('Database is closed.')
packed_addr = pack_ip(ip_addr)
bits = len(packed_addr) * 8
node = self.find_start_node(bits)
seen = set()
for i in xrange(bits):
if node >= self.nodes:
break
bit = (ord(packed_addr[i >> 3]) >> (7 - (i % 8))) & 1
node = self.parse_node(node, bit)
if node in seen:
raise LookupError('Circle in tree detected')
seen.add(node)
if node > self.nodes:
offset = node - self.nodes + self.db_size
return GeoIP(ip_addr, self.reader.read(offset)[0])
def find_start_node(self, bits):
if bits == 128 or not self.is_ipv6:
return 0
if self.ipv4_start is not None:
return self.ipv4_start
node = 0
for netmask in xrange(96):
if node >= self.nodes:
break
node = self.parse_node(netmask, 0)
self.ipv4_start = node
return node
def parse_node(self, node, index):
offset = node * self.node_size
if self.record_size == 24:
offset += index * 3
bytes = '\x00' + self.buffer[offset:offset + 3]
elif self.record_size == 28:
b = ord(self.buffer[offset + 3:offset + 4])
if index:
b &= 0x0F
else:
b = (0xF0 & b) >> 4
offset += index * 4
bytes = chr(b) + self.buffer[offset:offset + 3]
elif self.record_size == 32:
offset += index * 4
bytes = self.buffer[offset:offset + 4]
else:
raise LookupError('Invalid record size')
return unpack_int(bytes)[0]
def make_struct_parser(code):
"""Helper to create struct unpack methods."""
struct = Struct('>' + code)
def unpack_func(self, size, offset):
new_offset = offset + struct.size
bytes = self.buffer[offset:new_offset].rjust(struct.size, '\x00')
value = struct.unpack(bytes)[0]
return value, new_offset
return unpack_func
class MMDBParser(object):
"""
Parser for MaxMind MMDB binary format.
Reference: https://maxmind.github.io/MaxMind-DB/
"""
def __init__(self, buffer, offset=0):
self.buffer = buffer
self.offset = offset
def parse_ptr(self, size, offset):
ptr_size = ((size >> 3) & 0x3) + 1
bytes = self.buffer[offset:offset + ptr_size]
if ptr_size != 4:
bytes = chr(size & 0x7) + bytes
ptr = (
unpack_int(bytes.rjust(4, '\x00'))[0] +
self.offset +
MMDB_DATA_SECTION_SEPARATOR +
(0, 2048, 526336, 0)[ptr_size - 1]
)
return self.read(ptr)[0], offset + ptr_size
def parse_str(self, size, offset):
bytes = self.buffer[offset:offset + size]
return bytes.decode('utf-8', 'replace'), offset + size
parse_double = make_struct_parser('d')
def parse_bytes(self, size, offset):
return self.buffer[offset:offset + size], offset + size
def parse_uint(self, size, offset):
bytes = self.buffer[offset:offset + size]
return unpack_long(bytes.rjust(8, '\x00'))[0], offset + size
def parse_dict(self, size, offset):
container = {}
for _ in xrange(size):
key, offset = self.read(offset)
value, offset = self.read(offset)
container[key] = value
return container, offset
parse_int32 = make_struct_parser('i')
def parse_list(self, size, offset):
rv = [None] * size
for idx in xrange(size):
rv[idx], offset = self.read(offset)
return rv, offset
def parse_error(self, size, offset):
raise AssertionError('Read invalid type code')
def parse_bool(self, size, offset):
return size != 0, offset
parse_float = make_struct_parser('f')
callbacks = (
parse_error,
parse_ptr,
parse_str,
parse_double,
parse_bytes,
parse_uint,
parse_uint,
parse_dict,
parse_int32,
parse_uint,
parse_uint,
parse_list,
parse_error,
parse_error,
parse_bool,
parse_float,
)
def read(self, offset):
new_offset = offset + 1
byte = ord(self.buffer[offset:new_offset])
size = byte & 0x1f
ty = byte >> 5
if ty == 0:
byte = ord(self.buffer[new_offset:new_offset + 1])
ty = byte + 7
new_offset += 1
if ty != 1 and size >= 29:
to_read = size - 28
bytes = self.buffer[new_offset:new_offset + to_read]
new_offset += to_read
if size == 29:
size = 29 + ord(bytes)
elif size == 30:
size = 285 + unpack_short(bytes)[0]
elif size > 30:
size = 65821 + unpack_int(bytes.rjust(4, '\x00'))[0]
return self.callbacks[ty](self, size, new_offset)
def read_mmdb_meta_data(buffer):
offset = buffer.rfind(MMDB_META_DATA_START,
buffer.size() - MMDB_META_DATA_BLOCK_MAX_SIZE)
if offset < 0:
raise ValueError('Could not find meta data')
offset += len(MMDB_META_DATA_START)
return MMDBParser(buffer, offset).read(offset)[0]
def open_mmdb(filename):
"""Open memory mapped buffer of MMDB"""
with open(filename, 'rb') as f:
mmap_buffer = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
meta_data = read_mmdb_meta_data(mmap_buffer)
return MMDB(filename, mmap_buffer, meta_data)
def geoip_lookup(mmdb_path, cache_path):
"""Performs GeoIP lookups for IPs stored in cache"""
if not os.path.exists(cache_path):
return None
with open(cache_path, 'rb') as f:
cache = json.loads(f.read())
result = defaultdict(lambda: 0)
with open_mmdb(mmdb_path) as db:
for i, ip_data in enumerate(cache):
if 'geoip' not in ip_data:
geoip = db.lookup(ip_data['ip'])
if geoip:
cache[i].update(geoip=True, **geoip.to_dict())
result[geoip.country_en] += 1
with open(cache_path, 'wb') as f:
f.write(json.dumps(cache))
return sorted(result.items(), key=operator.itemgetter(1), reverse=True)
| [
[
[
24,
35
],
[
9050,
9061
]
],
[
[
43,
47
],
[
9016,
9020
],
[
9439,
9443
]
],
[
[
55,
59
],
[
8654,
8658
],
[
8686,
8690
]
],
[
[
67,
75
],
[
9496,
9504
]
],
[
[
83,
85
],
[
8914,
8916
]
],
[
[
93,
99
],
[
2095,
2101
],
[
2111,
2117
],
[
2160,
2166
],
[
2202,
2208
]
],
[
[
119,
125
],
[
262,
268
],
[
296,
302
],
[
331,
337
],
[
4970,
4976
]
],
[
[
127,
147
],
[
8264,
8284
],
[
8449,
8469
]
],
[
[
176,
205
],
[
8328,
8357
]
],
[
[
215,
242
],
[
5798,
5825
]
],
[
[
249,
259
],
[
4854,
4864
],
[
5720,
5730
],
[
8107,
8117
]
],
[
[
282,
293
],
[
6346,
6357
]
],
[
[
316,
328
],
[
8025,
8037
]
],
[
[
359,
364
],
[
3730,
3735
]
],
[
[
2066,
2073
],
[
3194,
3201
]
],
[
[
2290,
2294
],
[
8764,
8768
]
],
[
[
4881,
4899
],
[
6110,
6128
],
[
6672,
6690
],
[
7061,
7079
]
],
[
[
5251,
5261
],
[
2804,
2814
],
[
8482,
8492
]
],
[
[
8209,
8228
],
[
8720,
8739
]
],
[
[
8531,
8540
],
[
9082,
9091
]
],
[
[
8809,
8821
]
]
] |
# -*- coding: UTF-8 -*-
#! /usr/bin/python
# To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="ARA"
__all__ = ['norm']
__date__ ="$Feb 14, 2012 11:40:06 AM$"
from . import common_obj as _com
from . import constants as _cst
import numpy as _np
from .pigasusObject import *
class norm(pigasusObject):
def __init__ ( self, field = None, type = None, func = None, paramevalfunc = False, exact = None ):
pigasusObject.__init__(self)
self.id = self.com.nnorms
self.nparam = 0
self.paramevalfunc = paramevalfunc
if field is not None:
self.field = field
self.space = field.space
self.loc_id = self.space.grids.add_norm_id(self)
else:
raise("You must give a field for the current norm")
if type is not None:
self.type = type
else:
self.type = _cst.NORM_L2
self._set_nparam()
from .utils import function
if func is not None:
self.func = function(func, space=self.space)
else:
self.defaultFuncParam()
if exact is not None:
self.exact = function(exact, space=self.space)
else:
self.defaultFuncExact()
# this must be the last thing to do
self.com.nnorms += 1
self.com.norms.append(self)
def setInfoData(self):
"""
prints informations about the current norm
"""
self.infoData['id'] = str(self.id)
self.infoData['field'] = str(self.field.id)
self.infoData['space'] = str(self.space.id)
self.infoData['loc_id'] = str(self.loc_id)
self.infoData['nparam'] = str(self.nparam)
self.infoData['paramevalfunc'] = str(self.paramevalfunc)
self.infoData['type'] = str(self.type)
def _getGlobalNorm(self):
return self.com.pyfem.getglobalnorm ( self.id )
def _getPatchNorm(self):
li_npatchs = self.space.grids.npatchs
return self.com.pyfem._getPatchNorm ( self.id, li_npatchs )
def _getElementNorm(self, ai_patch):
li_nel = self.space.grids.list_grid[ai_patch].nel
return self.com.pyfem._getElementNorm ( self.id, ai_patch, li_nel)
def get(self, type=0, ai_patch=None):
"""
returns values for a given type of norm
type = 0 : for a global computation
type = 1 : for a patch computation
type = 2 : for an element computation
"""
if (type == 0) :
return self._getGlobalNorm()
if (type == 1) :
return self._getPatchNorm()
if (type == 2) and (ai_patch is not None):
return self._getElementNorm(ai_patch)
def setEvalNorm(self, ai_patch=0, fields=[], funcs=[]):
"""
fields is a list of fields
funcs is a list of functions
"""
lpr_pts = self.space.get_points(ai_patch)
list_pts = []
for i in range(0, self.space.dim):
list_pts.append(lpr_pts[i,0,:])
lpr_pts = list_pts
li_dim = self.space.dim
if li_dim not in [2]:
print("setEvalNorm: Not yet implemetend for the desired dimension")
lpi_shape = lpr_pts.shape[0:-1]
lpr_val = _np.zeros((1,lpi_shape[0],lpi_shape[1]))
for F in fields:
lpr_f = F.eval(ai_patch, elts)[ai_patch,:,:]
lpr_val[0,:,:] += lpr_f[:,:]
for func in funcs:
lpr_f = _np.zeros(lpr_pts.shape[0:-1])
for (i,list_p) in enumerate(lpr_pts):
for (j,p) in enumerate(list_p):
lpr_f[i,j] =func (p[0], p[1])[0]
lpr_val[0,:,:] += lpr_f[:,:]
self.com.pyfem.set_field_on_grids(self.field.id, ai_patch, lpr_val)
def _set_nparam(self):
if ( self.type in [ _cst.NORM_L2 ] ):
self.nparam = 1
return
if ( self.type in [ _cst.NORM_H1 ] ):
li_dim = self.space.dim
self.nparam = li_dim**2
return
else :
print("NORM-_set_nparam : type not implemented yet")
import sys; sys.exit(1)
def evalfunc(self, ai_patch, apr_points, elts=None, type="param"):
"""
Evaluation of the param-function over a given list of points
"""
if not self.paramevalfunc :
lpr_val = self._evalfunc_std(ai_patch, apr_points, elts, type)
else:
lpr_parampts = self.space.get_parametricPoints(ai_patch_id=ai_patch)
lpr_val = self._evalfunc_std(ai_patch, lpr_parampts, elts, type)
return lpr_val
def _evalfunc_std(self, ai_patch, apr_points, elts, type):
"""
sequential version of the evaluation
"""
if type == "param":
# print "==== param evaluation"
return self.func(apr_points)
if type == "exact":
# print "==== exact evaluation"
return self.exact(apr_points)
def defaultFuncParam(self):
li_dim = self.space.dim
if ( self.type in [ _cst.NORM_L2 ] ):
if li_dim == 1:
func = lambda x : [1.0]
if li_dim == 2:
func = lambda x,y : [1.0]
if li_dim == 3:
func = lambda x,y,z : [1.0]
elif ( self.type in [ _cst.NORM_H1 ] ):
if li_dim == 1:
func = lambda x : [1.0]
if li_dim == 2:
func = lambda x,y : [1.0, 0.0, 0.0, 1.0]
if li_dim == 3:
func = lambda x,y,z : [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
else :
print("NORM-defaultFuncParam : type not implemented yet")
import sys; sys.exit(1)
from .utils import function
self.func = function(func, space=self.space)
def defaultFuncExact(self):
li_dim = self.space.dim
if li_dim == 1:
func = lambda x : [0.0] * self.field.ndof
elif li_dim == 2:
func = lambda x,y : [0.0] * self.field.ndof
elif li_dim == 3:
func = lambda x,y,z : [0.0] * self.field.ndof
else :
raise("type not implemented yet")
from .utils import function
self.exact = function(exact, space=self.space)
def set_func(self, exact):
"""
this sets the param-function of the current field
"""
from .utils import function
self.exact = function(exact, space=self.space)
| [
[
[
136,
146
]
],
[
[
153,
160
]
],
[
[
172,
180
]
],
[
[
226,
244
]
],
[
[
259,
276
],
[
932,
936
],
[
3868,
3872
],
[
3961,
3965
],
[
5109,
5113
],
[
5367,
5371
]
],
[
[
284,
296
],
[
3301,
3304
],
[
3512,
3515
]
],
[
[
324,
325
],
[
338,
351
],
[
466,
479
],
[
3404,
3408
],
[
6299,
6304
]
],
[
[
333,
337
]
]
] |
# -*- coding: utf-8 -*-
"""Climate indices computation package based on Xarray."""
from importlib.resources import contents, path
from xclim.core import units # noqa
from xclim.core.indicator import build_indicator_module_from_yaml
from xclim.core.locales import load_locale
from xclim.core.options import set_options # noqa
from xclim.indicators import atmos, land, seaIce # noqa
__author__ = """Travis Logan"""
__email__ = "logan.travis@ouranos.ca"
__version__ = "0.28.0"
# Load official locales
for filename in contents("xclim.data"):
# Only select <locale>.json and not <module>.<locale>.json
if filename.endswith(".json") and filename.count(".") == 1:
locale = filename.split(".")[0]
with path("xclim.data", filename) as f:
load_locale(f, locale)
# Virtual modules creation:
with path("xclim.data", "icclim.yml") as f:
build_indicator_module_from_yaml(f.with_suffix(""), mode="raise")
with path("xclim.data", "anuclim.yml") as f:
build_indicator_module_from_yaml(f.with_suffix(""), mode="raise")
with path("xclim.data", "cf.yml") as f:
# ignore because some generic function are missing.
build_indicator_module_from_yaml(f.with_suffix(""), mode="ignore")
| [
[
[
115,
123
],
[
521,
529
]
],
[
[
125,
129
],
[
725,
729
],
[
830,
834
],
[
944,
948
],
[
1059,
1063
]
],
[
[
154,
159
]
],
[
[
201,
233
],
[
873,
905
],
[
988,
1020
],
[
1154,
1186
]
],
[
[
265,
276
],
[
772,
783
]
],
[
[
308,
319
]
],
[
[
357,
362
]
],
[
[
364,
368
]
],
[
[
370,
376
]
],
[
[
386,
396
]
],
[
[
418,
427
]
],
[
[
456,
467
]
],
[
[
509,
517
],
[
615,
623
],
[
646,
654
],
[
689,
697
],
[
744,
752
]
],
[
[
680,
686
],
[
787,
793
]
],
[
[
757,
758
],
[
784,
785
]
],
[
[
866,
867
],
[
906,
907
]
],
[
[
981,
982
],
[
1021,
1022
]
],
[
[
1091,
1092
],
[
1187,
1188
]
]
] |
"""Class for storing SRP password verifiers."""
from utils.cryptomath import *
from utils.compat import *
import mathtls
from BaseDB import BaseDB
class VerifierDB(BaseDB):
"""This class represent an in-memory or on-disk database of SRP
password verifiers.
A VerifierDB can be passed to a server handshake to authenticate
a client based on one of the verifiers.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new VerifierDB instance.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "verifier")
def _getItem(self, username, valueStr):
(N, g, salt, verifier) = valueStr.split(" ")
N = base64ToNumber(N)
g = base64ToNumber(g)
salt = base64ToString(salt)
verifier = base64ToNumber(verifier)
return (N, g, salt, verifier)
def __setitem__(self, username, verifierEntry):
"""Add a verifier entry to the database.
@type username: str
@param username: The username to associate the verifier with.
Must be less than 256 characters in length. Must not already
be in the database.
@type verifierEntry: tuple
@param verifierEntry: The verifier entry to add. Use
L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a
verifier entry.
"""
BaseDB.__setitem__(self, username, verifierEntry)
def _setItem(self, username, value):
if len(username)>=256:
raise ValueError("username too long")
N, g, salt, verifier = value
N = numberToBase64(N)
g = numberToBase64(g)
salt = stringToBase64(salt)
verifier = numberToBase64(verifier)
valueStr = " ".join( (N, g, salt, verifier) )
return valueStr
def _checkItem(self, value, username, param):
(N, g, salt, verifier) = value
x = mathtls.makeX(salt, username, param)
v = powMod(g, x, N)
return (verifier == v)
def makeVerifier(username, password, bits):
"""Create a verifier entry which can be stored in a VerifierDB.
@type username: str
@param username: The username for this verifier. Must be less
than 256 characters in length.
@type password: str
@param password: The password for this verifier.
@type bits: int
@param bits: This values specifies which SRP group parameters
to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144,
8192). Larger values are more secure but slower. 2048 is a
good compromise between safety and speed.
@rtype: tuple
@return: A tuple which may be stored in a VerifierDB.
"""
return mathtls.makeVerifier(username, password, bits)
makeVerifier = staticmethod(makeVerifier) | [
[
[
78,
79
]
],
[
[
105,
106
],
[
966,
980
],
[
996,
1010
],
[
1029,
1043
],
[
1069,
1083
],
[
1862,
1876
],
[
1892,
1906
],
[
1925,
1939
],
[
1965,
1979
],
[
2220,
2226
]
],
[
[
114,
121
],
[
2171,
2178
],
[
3012,
3019
]
],
[
[
141,
147
],
[
166,
172
],
[
812,
818
],
[
1639,
1645
]
],
[
[
155,
165
]
]
] |
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
'''
____ _ _ _ _ __ __ _
| _ \ __ _| |__ | |__ (_) |_| \/ | __ _ ___| | __
| |_) / _` | '_ \| '_ \| | __| |\/| |/ _` / __| |/ /
| _ < (_| | |_) | |_) | | |_| | | | (_| \__ \ <
|_| \_\__,_|_.__/|_.__/|_|\__|_| |_|\__,_|___/_|\_\
'''
# 协议默认字典配置
HTTP_PORT=['80']
HTTPS_PORT=['443','8443'] | [
[
[
328,
337
]
],
[
[
345,
355
]
]
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-04-02 16:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("poll", "0007_poll_content_type"),
]
operations = [
migrations.RemoveField(
model_name="poll",
name="content_type",
),
]
| [
[
[
95,
111
]
],
[
[
135,
145
],
[
164,
174
],
[
287,
297
]
],
[
[
154,
163
]
]
] |
from cmdbus import cmdbus, Command
class AddCommand(Command):
def __init__(self, v1: int, v2: int):
self.v1 = v1
self.v2 = v2
def handle(self):
return self.v1 + self.v2
def test_dispatch():
cmd = AddCommand(3, 5)
result = cmdbus.dispatch(cmd)
assert result is 8
| [
[
[
19,
25
],
[
267,
273
]
],
[
[
27,
34
],
[
54,
61
]
],
[
[
43,
53
],
[
237,
247
]
],
[
[
210,
223
]
]
] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class OctConv(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size, stride=1, alphas=(0.5, 0.5)):
super(OctConv, self).__init__()
self.alpha_in, self.alpha_out = alphas
assert 0 <= self.alpha_in <= 1 and 0 <= self.alpha_in <= 1, "Alphas must be in interval [0, 1]"
# CH IN
self.ch_in_hf = int((1 - self.alpha_in) * ch_in)
self.ch_in_lf = ch_in - self.ch_in_hf
# CH OUT
self.ch_out_hf = int((1 - self.alpha_out) * ch_out)
self.ch_out_lf = ch_out - self.ch_out_hf
# FILTERS
self.wHtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_hf, kernel_size, kernel_size))
self.wHtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_hf, kernel_size, kernel_size))
self.wLtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_lf, kernel_size, kernel_size))
self.wLtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_lf, kernel_size, kernel_size))
# PADDING: (H - F + 2P)/S + 1 = 2 * [(0.5 H - F + 2P)/S +1] -> P = (F-S)/2
self.padding = (kernel_size - stride) // 2
def forward(self, input):
# logic to handle input tensors:
# if alpha_in = 0., we assume to be at the first layer, with only high freq repr
if self.alpha_in == 0:
hf_input = input
lf_input = torch.Tensor([]).reshape(1, 0)
else:
fmap_size = input.shape[-1]
hf_input = input[:, :self.ch_in_hf * 4, ...].reshape(-1, self.ch_in_hf, fmap_size * 2, fmap_size * 2)
lf_input = input[:, self.ch_in_hf * 4:, ...]
HtoH = HtoL = LtoL = LtoH = 0.
if self.alpha_in < 1:
# if alpha < 1 there is high freq component
if self.ch_out_hf > 0:
HtoH = F.conv2d(hf_input, self.wHtoH, padding=self.padding)
if self.ch_out_lf > 0:
HtoL = F.conv2d(F.avg_pool2d(hf_input, 2), self.wHtoL, padding=self.padding)
if self.alpha_in > 0:
# if alpha > 0 there is low freq component
if self.ch_out_hf > 0:
LtoH = F.interpolate(F.conv2d(lf_input, self.wLtoH, padding=self.padding),
scale_factor=2, mode='nearest')
if self.ch_out_lf > 0:
LtoL = F.conv2d(lf_input, self.wLtoL, padding=self.padding)
hf_output = HtoH + LtoH
lf_output = LtoL + HtoL
if 0 < self.alpha_out < 1:
# if alpha in (0, 1)
fmap_size = hf_output.shape[-1] // 2
hf_output = hf_output.reshape(-1, 4 * self.ch_out_hf, fmap_size, fmap_size)
output = torch.cat([hf_output, lf_output], dim=1) # cat over channel dim
elif np.isclose(self.alpha_out, 1., atol=1e-8):
# if only low req (alpha_out = 1.)
output = lf_output
elif np.isclose(self.alpha_out, 0., atol=1e-8):
# if only high freq (alpha_out = 0.)
output = hf_output
return output
oc = OctConv(ch_in=3, ch_out=3, kernel_size=3, alphas=(0., 0.5))
oc1 = OctConv(ch_in=3, ch_out=10, kernel_size=7, alphas=(0.5, 0.8))
oc2 = OctConv(ch_in=10, ch_out=1, kernel_size=3, alphas=(0.8, 0.))
out = oc2(oc1(oc(torch.randn(2, 3, 32, 32))))
print(out.shape)
| [
[
[
7,
18
],
[
2828,
2830
],
[
2962,
2964
]
],
[
[
26,
31
],
[
3326,
3331
],
[
686,
691
],
[
790,
795
],
[
894,
899
],
[
998,
1003
],
[
1447,
1452
],
[
2750,
2755
]
],
[
[
39,
53
],
[
102,
104
],
[
673,
675
],
[
777,
779
],
[
881,
883
],
[
985,
987
]
],
[
[
61,
85
],
[
1887,
1888
],
[
1998,
1999
],
[
2007,
2008
],
[
2211,
2212
],
[
2225,
2226
],
[
2406,
2407
]
],
[
[
94,
101
],
[
3114,
3121
],
[
3180,
3187
],
[
3248,
3255
],
[
209,
216
]
],
[
[
3109,
3111
],
[
3323,
3325
]
],
[
[
3174,
3177
],
[
3319,
3322
]
],
[
[
3242,
3245
],
[
3315,
3318
]
],
[
[
3309,
3312
],
[
3361,
3364
]
]
] |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import unittest
from tvcm import parse_html_deps
from tvcm import module as module_module
from tvcm import html_generation_controller
class ParseTests(unittest.TestCase):
def test_parse_empty(self):
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse("")
self.assertEquals([], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
def test_parse_none(self):
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(None)
self.assertEquals([], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
def test_parse_script_src(self):
html = """<!DOCTYPE html>
<html>
<head>
<script src="polymer.min.js"></script>
<script src="foo.js"></script>
</head>
<body>
</body>
</html>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(['polymer.min.js', 'foo.js'], module.scripts_external);
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
self.assertTrue(module.has_decl)
self.assertTrue('DOCTYPE html' not in module.html_contents_without_links_and_script)
class Ctl(html_generation_controller.HTMLGenerationController):
def GetHTMLForScriptHRef(self, href):
if href == "polymer.min.js":
return "<script>POLYMER</script>"
elif href == "foo.js":
return "<script>FOO</script>"
return None
def GetHTMLForStylesheetHRef(self, href):
return None
gen_html = module.GenerateHTML(Ctl())
ghtm = """
<html>
<head>
<script>POLYMER</script>
<script>FOO</script>
</head>
<body>
</body>
</html>"""
self.assertEquals(ghtm, gen_html)
def test_parse_link_rel_import(self):
html = """<!DOCTYPE html>
<html>
<head>
<link rel="import" href="x-foo.html">
</head>
<body>
</body>
</html>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals([], module.scripts_external);
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals(['x-foo.html'], module.imports)
self.assertTrue(module.has_decl)
def test_parse_script_inline(self):
html = """<polymer-element name="tk-element-proto">
<template>
</template>
<script>
tvcm.require("foo");
tvcm.require('bar');
</script>
</polymer-element>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals([], module.scripts_external);
self.assertEquals(1, len(module.inline_scripts))
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
self.assertFalse(module.has_decl)
script0 = module.inline_scripts[0]
val = re.sub(r"\s+", '', script0.contents)
inner_script = """tvcm.require("foo");tvcm.require('bar');"""
self.assertEquals(inner_script, val)
self.assertEquals(1, len(script0.open_tags))
self.assertEquals('polymer-element', script0.open_tags[0].tag)
assert 'tvcm.require("foo");' not in module.html_contents_without_links_and_script
def test_parse_script_src_sripping(self):
html = """
<script src="blah.js"></script>
"""
module = parse_html_deps.HTMLModuleParser().Parse(html)
self.assertEquals('\n\n', module.html_contents_without_links_and_script)
def test_parse_link_rel_stylesheet(self):
html = """<polymer-element name="hi">
<template>
<link rel="stylesheet" href="frameworkstyles.css">
</template>
</polymer-element>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals([], module.scripts_external);
self.assertEquals([], module.inline_scripts)
self.assertEquals(['frameworkstyles.css'], module.stylesheets)
self.assertEquals([], module.imports)
self.assertFalse(module.has_decl)
class Ctl(html_generation_controller.HTMLGenerationController):
def GetHTMLForScriptHRef(self, href):
return None
def GetHTMLForStylesheetHRef(self, href):
if href == "frameworkstyles.css":
return "<style>FRAMEWORK</style>"
return None
gen_html = module.GenerateHTML(Ctl())
ghtm = """<polymer-element name="hi">
<template>
<style>FRAMEWORK</style>
</template>
</polymer-element>"""
self.assertEquals(ghtm, gen_html)
def test_parse_inline_style(self):
html = """
<style>
hello
</style>"""
module = parse_html_deps.HTMLModuleParser().Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
class Ctl(html_generation_controller.HTMLGenerationController):
def GetHTMLForInlineStylesheet(self, contents):
if contents == '\n hello\n':
return '\n HELLO\n'
return None
gen_html = module.GenerateHTML(Ctl())
ghtm = """
<style>
HELLO
</style>"""
self.assertEquals(ghtm, gen_html)
def test_parse_style_import(self):
html = """<polymer-element name="x-blink">
<template>
<style>
@import url(awesome.css);
</style>
</template>
</polymer-element>"""
parser = parse_html_deps.HTMLModuleParser()
self.assertRaises(lambda: parser.Parse(html))
def test_html_contents_basic(self):
html = """<a b="c">d</a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
def test_html_contents_with_entity(self):
html = """<a>→</a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
def test_html_content_with_charref(self):
html = """<a>></a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
def test_html_content_start_end_br(self):
html = """<a><br /></a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
def test_html_content_start_end_img(self):
html = """<a><img src="foo.png" id="bar" /></a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
def test_html_contents_with_link_stripping(self):
html = """<a b="c">d</a>
<link rel="import" href="x-foo.html">"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals("""<a b="c">d</a>""",
module.html_contents_without_links_and_script.strip())
def test_html_contents_with_style_link_stripping(self):
html = """<a b="c">d</a>
<link rel="stylesheet" href="frameworkstyles.css">"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals("""<a b="c">d</a>""",
module.html_contents_without_links_and_script.strip())
def test_malformed_script_raises(self):
html = """<script src="x"/>"""
parser = parse_html_deps.HTMLModuleParser()
def DoIt():
module = parser.Parse(html)
self.assertRaises(Exception, DoIt)
def test_malformed_br_raises(self):
html = """<br>"""
parser = parse_html_deps.HTMLModuleParser()
def DoIt():
module = parser.Parse(html)
self.assertRaises(Exception, DoIt)
def test_br_does_not_raise(self):
html = """<div><br/></div>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
def test_p_does_not_raises(self):
html = """<div></p></div>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
def test_link_endlink_does_not_raise(self):
html = """<link rel="stylesheet" href="foo.css"></link>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
def test_link_script_does_not_raise(self):
html = """<link rel="stylesheet" href="foo.css">
<script>
</script>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
def test_malformed_script_raises(self):
html = """<script src="/jszip-inflate.js"</script>"""
parser = parse_html_deps.HTMLModuleParser()
def DoIt():
module = parser.Parse(html)
self.assertRaises(Exception, DoIt)
def test_script_with_script_inside_as_js(self):
html = """<script>
var html_lines = [
'<script>',
'<\/script>',
];
</script>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
def test_invalid_script_escaping_raises(self):
html = """<script>
var html_lines = [
'<script>',
'< /script>',
];
</script>"""
parser = parse_html_deps.HTMLModuleParser()
def DoIt():
module = parser.Parse(html)
self.assertRaises(Exception, DoIt)
if __name__ == '__main__':
unittest.main()
| [
[
[
196,
198
],
[
3635,
3637
]
],
[
[
206,
214
],
[
353,
361
],
[
10168,
10176
]
],
[
[
233,
248
],
[
416,
431
],
[
712,
727
],
[
1292,
1307
],
[
2652,
2667
],
[
3287,
3302
],
[
4093,
4108
],
[
4478,
4493
],
[
5433,
5448
],
[
6179,
6194
],
[
6348,
6363
],
[
6579,
6594
],
[
6809,
6824
],
[
7040,
7055
],
[
7296,
7311
],
[
7588,
7603
],
[
7945,
7960
],
[
8224,
8239
],
[
8422,
8437
],
[
8630,
8645
],
[
8780,
8795
],
[
8970,
8985
],
[
9199,
9214
],
[
9380,
9395
],
[
9726,
9741
],
[
10014,
10029
]
],
[
[
266,
289
]
],
[
[
307,
333
],
[
1715,
1741
],
[
4808,
4834
],
[
5570,
5596
]
],
[
[
342,
352
]
]
] |
from typing import TypeVar, Callable
import unittest
from ._types import TestMethod
_F = TypeVar("_F", bound=TestMethod)
def test(method: _F) -> _F:
"""Decorator that flags a method as a test method."""
method._dectest_test = True # type: ignore
return method
def before(method: _F) -> _F:
"""Decorator that flags a method as fixture setup.
Fixture setup methods from base classes are guaranteed to be executed
before setup methods from derived classes.
"""
method._dectest_before = True # type: ignore
return method
def after(method: _F) -> _F:
"""Decorator that flags a method as fixture teardown.
Fixture teardown methods from base classes are guaranteed to be executed
after teardown methods from derived classes.
"""
method._dectest_after = True # type: ignore
return method
def skip(reason: str) -> Callable[[_F], _F]:
"""Unconditionally skip the decorated test.
This is equivalent to @unittest.skip, but also marks the decorated
function as a test.
"""
if not isinstance(reason, str):
raise TypeError("first argument to @skip must be a reason string")
def decorate(method: _F) -> _F:
return unittest.skip(reason)(test(method))
return decorate
def skip_if(condition: bool, reason: str) -> Callable[[_F], _F]:
"""Skip the decorated test if condition is true.
This is equivalent to @unittest.skipIf, but also marks the decorated
function as a test.
"""
def decorate(method: _F) -> _F:
return unittest.skipIf(condition, reason)(test(method))
return decorate
def skip_unless(condition: bool, reason: str) -> Callable[[_F], _F]:
"""Skip the decorated test unless condition is true.
This is equivalent to @unittest.skipUnless, but also marks the decorated
function as a test.
"""
def decorate(method: _F) -> _F:
return unittest.skipUnless(condition, reason)(test(method))
return decorate
| [
[
[
19,
26
],
[
91,
98
]
],
[
[
28,
36
],
[
880,
888
],
[
1320,
1328
],
[
1672,
1680
]
],
[
[
44,
52
],
[
1216,
1224
],
[
1551,
1559
],
[
1911,
1919
]
],
[
[
74,
84
],
[
111,
121
]
],
[
[
86,
88
],
[
149,
151
],
[
142,
144
],
[
305,
307
],
[
298,
300
],
[
589,
591
],
[
582,
584
],
[
890,
892
],
[
895,
897
],
[
1330,
1332
],
[
1335,
1337
],
[
1682,
1684
],
[
1687,
1689
],
[
1197,
1199
],
[
1190,
1192
],
[
1532,
1534
],
[
1525,
1527
],
[
1892,
1894
],
[
1885,
1887
]
],
[
[
129,
133
],
[
1238,
1242
],
[
1586,
1590
],
[
1950,
1954
]
],
[
[
283,
289
]
],
[
[
568,
573
]
],
[
[
859,
863
]
],
[
[
1279,
1286
]
],
[
[
1627,
1638
]
]
] |
# Databricks notebook source
# MAGIC %md
# MAGIC # CCU013_08 Paper subset data to cohort
# MAGIC
# MAGIC **Description**
# MAGIC
# MAGIC This notebook subsets the covid trajectory, severity and events tables to the cohort used for the phenotype severity paper.
# MAGIC
# MAGIC **Project(s)** CCU0013
# MAGIC
# MAGIC **Author(s)** Johan Thygesen, Chris Tomlinson
# MAGIC
# MAGIC **Reviewer(s)**
# MAGIC
# MAGIC **Date last updated** 2022-01-22
# MAGIC
# MAGIC **Date last reviewed**
# MAGIC
# MAGIC **Date last run** 2022-01-22
# MAGIC
# MAGIC **Data input**
# MAGIC 1. Descriptive Paper methodology derived cohort
# MAGIC 2. Maximally inclusive COVID-19 related event phenotypes:
# MAGIC 1. `ccu013_covid_trajectory`
# MAGIC 2. `ccu013_covid_events_demographics`
# MAGIC
# MAGIC **Data output**
# MAGIC 1. `ccu013_covid_trajectory_paper_cohort` - Comprehensive long list of COVID-19 related events, subset to paper cohort
# MAGIC 2. `ccu013_covid_severity_paper_cohort` - Mutually exclusive 'worst' COVID-19 related event, 1 row per patient
# MAGIC 3. `ccu013_covid_events_demographics_paper_cohort`- Binary matrix of COVID-19 related events + demographics, 1 row per patient
# MAGIC
# MAGIC **Software and versions** SQL, python
# MAGIC
# MAGIC **Packages and versions** See cell below:
# MAGIC
# MAGIC **TODO**
# MAGIC * Implement Longcovid search
# COMMAND ----------
# MAGIC %md
# MAGIC # 1 Subset Covid Phenotype data to the cohort population of interest
# COMMAND ----------
from pyspark.sql.functions import lit, col, udf
from functools import reduce
from pyspark.sql import DataFrame
from datetime import datetime
from pyspark.sql.types import DateType
# COMMAND ----------
# MAGIC %run /Workspaces/dars_nic_391419_j3w9t_collab/CCU013/COVID-19-SEVERITY-PHENOTYPING/CCU013_00_helper_functions
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1.1 New approach (current) using the DP definition
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Individuals alive and registred in GDPPR on 23/01/2020
# MAGIC --- Old value = 55,876,173
# MAGIC --- Old @ 170821 = 56,609,049
# MAGIC --- Current value @ 220122 = 57,032,174
# MAGIC SELECT count(DISTINCT NHS_NUMBER_DEID) FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020
# COMMAND ----------
# MAGIC %md
# MAGIC #### 1.1.1 Find patients who do not have minimum follow up time
# MAGIC - Participants with non-fatal index events who had less than 28 days of follow up were excluded.
# COMMAND ----------
# MAGIC %sql
# MAGIC --- IMPORTANT check that no death date is larger than the study end date !!!
# MAGIC ---- As that would cause errors in the code below
# MAGIC SELECT MAX(date)
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# MAGIC WHERE covid_phenotype == '04_Fatal_with_covid_diagnosis' OR
# MAGIC covid_phenotype == '04_Fatal_without_covid_diagnosis' OR
# MAGIC covid_phenotype == '04_Covid_inpatient_death'
# COMMAND ----------
from pyspark.sql.functions import *
# Warning - update study end date
study_end_date = lit(datetime(2021, 11, 30))
all_fatal = spark.sql("""
SELECT person_id_deid, MIN(date) AS death_date
FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
WHERE (covid_phenotype == '04_Fatal_with_covid_diagnosis' OR
covid_phenotype == '04_Fatal_without_covid_diagnosis' OR
covid_phenotype == '04_Covid_inpatient_death')
AND date >= "2020-01-23"
GROUP BY person_id_deid
""")
# Get first covid event dates for everyone, expect those with ONLY fatal events
followup_time = spark.sql("""
SELECT person_id_deid, MIN(date) AS first_covid_event
FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
WHERE (covid_phenotype != '04_Fatal_with_covid_diagnosis' OR
covid_phenotype != '04_Fatal_without_covid_diagnosis' OR
covid_phenotype != '04_Covid_inpatient_death')
AND date >= "2020-01-23"
GROUP BY person_id_deid
""")
# Calculate elapsed number of days between earliest event and study end (except if fatal)
followup_time = followup_time.join(all_fatal, ['person_id_deid'], how='left')
followup_time = followup_time.select(['person_id_deid', 'first_covid_event', 'death_date'])
followup_time = followup_time.withColumn('study_end', study_end_date)
followup_time= followup_time.withColumn('followup_days',
when(followup_time['death_date'].isNull(), datediff(followup_time['study_end'], followup_time['first_covid_event'])).otherwise(-1))
# Mark deaths within 28 days
followup_time = followup_time.withColumn('28d_followup', \
when((followup_time['followup_days'] >= 28) | (followup_time['followup_days'] == -1), 1).otherwise(0))
#display(followup_time)
followup_time.createOrReplaceGlobalTempView('followup_time')
# COMMAND ----------
# MAGIC %md
# MAGIC Note that these counts are prior to joining on to skinny table, in other words could contain patients that don't meet the study inclusion
# COMMAND ----------
# MAGIC %sql
# MAGIC -- participants excluded due to lack of 28 days minimal followup time.
# MAGIC -- OLD With study_end as 2021, 3, 31 -> 1,280,138
# MAGIC -- OLD WIth study_end as 2021, 5, 31 -> 1,081,496
# MAGIC -- current with study_end as 2021, 11, 30 -> 917,278
# MAGIC SELECT count(DISTINCT person_id_deid) FROM global_temp.followup_time
# MAGIC WHERE 28d_followup == 0
# COMMAND ----------
# MAGIC %sql
# MAGIC --- CHECK that no follwup time is less than -1
# MAGIC SELECT * FROM global_temp.followup_time
# MAGIC where followup_days < -1
# COMMAND ----------
# MAGIC %md
# MAGIC #### 1.1.2 Subset trajectory table
# MAGIC Subset for cohort population - inclusion time and minimum follow-up
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Current @ 22.01.22 = 8,714,594
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Current @ 22.01.22 = 8,714,455
# MAGIC -- Removes only: 139 patients
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# MAGIC WHERE date >= "2020-01-23" AND date <= "2021-11-30"
# COMMAND ----------
# MAGIC %sql
# MAGIC -- These are those records dated before index event
# MAGIC -- NB 597 unique IDs here, but these patients could also have event within study dates
# MAGIC SELECT * from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
# MAGIC WHERE date < "2020-01-23" OR date > "2021-11-30"
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Subset trajectory table to cohort population and cohort timeline
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_paper_cohort_tmp AS
# MAGIC SELECT tab1.* FROM
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory tab1
# MAGIC INNER JOIN
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 tab2
# MAGIC ON
# MAGIC tab1.person_id_deid = tab2.NHS_NUMBER_DEID
# MAGIC WHERE date >= "2020-01-23" AND date <= "2021-11-30"
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Value @ 150621 3567617
# MAGIC -- Value @ 170821 3705123
# MAGIC -- Value @ 220222 8103909
# MAGIC SELECT count (DISTINCT person_id_deid) from global_temp.ccu013_covid_trajectory_paper_cohort_tmp
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Remove those based on minimum follow-up criteria
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_paper_cohort as
# MAGIC WITH list_patients_to_omit AS (SELECT person_id_deid from global_temp.followup_time WHERE 28d_followup == 0)
# MAGIC SELECT /*+ BROADCAST(list_patients_to_omit) */ t.* FROM global_temp.ccu013_covid_trajectory_paper_cohort_tmp as t
# MAGIC LEFT ANTI JOIN list_patients_to_omit ON t.person_id_deid = list_patients_to_omit.person_id_deid
# COMMAND ----------
drop_table("ccu013_covid_trajectory_paper_cohort")
create_table("ccu013_covid_trajectory_paper_cohort")
# COMMAND ----------
# MAGIC %sql
# MAGIC OPTIMIZE dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort ZORDER BY person_id_deid
# COMMAND ----------
# MAGIC %sql
# MAGIC -- value @ 150621 = 3454653
# MAGIC -- value @ 170821 = 3469528
# MAGIC -- value @ 220122 = 7244925
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC -- value @ 150621 = 8683174
# MAGIC -- value @ 170821 = 8825738
# MAGIC -- value @ 220122 = 13990423
# MAGIC SELECT count (*) as total_records from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT covid_phenotype, count (DISTINCT person_id_deid) as unique_ids, count (person_id_deid) as observations
# MAGIC from dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort
# MAGIC group by covid_phenotype
# MAGIC order by covid_phenotype
# COMMAND ----------
# MAGIC %md
# MAGIC #### 1.1.3 Recreate severity table using cohort only info
# COMMAND ----------
# MAGIC %sql
# MAGIC -- OLD value 5,044,357
# MAGIC -- Current value 8,714,594
# MAGIC SELECT count (DISTINCT person_id_deid) from dars_nic_391419_j3w9t_collab.ccu013_covid_severity
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_severity_paper_cohort AS
# MAGIC SELECT DISTINCT s.person_id_deid, s.date, s.covid_severity, s.ProductionDate FROM dars_nic_391419_j3w9t_collab.ccu013_covid_severity as s
# MAGIC INNER JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort as t
# MAGIC ON s.person_id_deid == t.person_id_deid
# COMMAND ----------
drop_table("ccu013_covid_severity_paper_cohort")
create_table("ccu013_covid_severity_paper_cohort")
# COMMAND ----------
# MAGIC %sql
# MAGIC REFRESH dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC -- value @ 150621 = 3454653
# MAGIC -- value @ 170821 = 3469528
# MAGIC -- Current @ 220122 = 7244925
# MAGIC SELECT count(DISTINCT person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT count(*) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort
# COMMAND ----------
# MAGIC %md
# MAGIC # 2 Create input for patient trajectory plots
# MAGIC - Create order and simplified phenotype groups for the plots
# MAGIC - Get the first event date from the new simplified trajectory phenotypes and order by id, date and phenotype order.
# MAGIC - Calculate days between events and write to table for further processing in R
# MAGIC - see ccu013 R script ccu013_trajectory_finder.R for next steps
# COMMAND ----------
# MAGIC %md
# MAGIC ## 2.1 Full study period
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Query to get all events includeing the uneffected event at the start of the pandemic for all individuals in study.
# MAGIC ---SELECT covid_severity, count(covid_severity) FROM (
# MAGIC SELECT person_id_deid, date, covid_phenotype, (CASE WHEN covid_severity IS NULL THEN '00_unaffected' ELSE covid_severity END) AS covid_severity, phenotype_order, trajectory_phenotype FROM (
# MAGIC SELECT NHS_NUMBER_DEID AS person_id_deid, DATE('2020-01-23') AS date, '00_Unaffected' AS covid_phenotype, covid_severity, 0 AS phenotype_order, 'Unaffected' AS trajectory_phenotype FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 AS a
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort AS b ON a.NHS_NUMBER_DEID = b.person_id_deid)
# MAGIC ---)group by covid_severity
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Create an ordered and simplified phenotype groups table
# MAGIC --- This includes all events includeing the uneffected event at the start of the pandemic for all individuals in study.
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_plot_data_tmp AS
# MAGIC SELECT * FROM
# MAGIC (SELECT DISTINCT tab1.person_id_deid, tab1.date, tab1.covid_phenotype, tab2.covid_severity,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then 1
# MAGIC when "01_GP_covid_diagnosis" then 2
# MAGIC when "02_Covid_admission" then 3
# MAGIC when "03_NIV_treatment" then 4
# MAGIC when "03_ICU_admission" then 4
# MAGIC when "03_IMV_treatment" then 4
# MAGIC when "03_ECMO_treatment" then 4
# MAGIC when "04_Fatal_with_covid_diagnosis" then 5
# MAGIC when "04_Fatal_without_covid_diagnosis" then 5
# MAGIC when "04_Covid_inpatient_death" then 5 ELSE NULL end) as phenotype_order,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then "Positive test"
# MAGIC when "01_GP_covid_diagnosis" then "Primary care diagnosis"
# MAGIC when "02_Covid_admission" then "Hospitalisation"
# MAGIC when "03_NIV_treatment" then "Critical care"
# MAGIC when "03_ICU_admission" then "Critical care"
# MAGIC when "03_IMV_treatment" then "Critical care"
# MAGIC when "03_ECMO_treatment" then "Critical care"
# MAGIC when "04_Fatal_with_covid_diagnosis" then "Death"
# MAGIC when "04_Fatal_without_covid_diagnosis" then "Death"
# MAGIC when "04_Covid_inpatient_death" then "Death" ELSE NULL end) as trajectory_phenotype
# MAGIC FROM
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort as tab1
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity as tab2 ON tab1.person_id_deid = tab2.person_id_deid
# MAGIC UNION ALL
# MAGIC SELECT person_id_deid, date, covid_phenotype, (CASE WHEN covid_severity IS NULL THEN '00_unaffected' ELSE covid_severity END) AS covid_severity, phenotype_order, trajectory_phenotype FROM (
# MAGIC SELECT NHS_NUMBER_DEID AS person_id_deid, DATE('2020-01-23') AS date, '00_Unaffected' AS covid_phenotype, covid_severity, 0 AS phenotype_order, 'Unaffected' AS trajectory_phenotype FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 AS a
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort AS b ON a.NHS_NUMBER_DEID = b.person_id_deid))
# MAGIC ORDER BY person_id_deid, date, phenotype_order
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Exemplar of ordered data
# MAGIC SELECT * from global_temp.ccu013_covid_trajectory_plot_data_tmp
# MAGIC WHERE person_id_deid = '00046L6S0IX8YE1'
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Example of query used below to caclualte time between events per ID
# MAGIC --- Get the event dates from the new trajectory phenotypes and order by id, date and phenotype order.
# MAGIC SELECT DISTINCT person_id_deid, min(date) as date, covid_severity, trajectory_phenotype, phenotype_order from global_temp.ccu013_covid_trajectory_plot_data_tmp
# MAGIC GROUP BY person_id_deid, phenotype_order, trajectory_phenotype, covid_severity
# MAGIC ORDER BY person_id_deid, date, phenotype_order
# COMMAND ----------
## 3) Calculate days between events and write to table for further processing in R
### see ccu013 R script ccu013_trajectory_finder.R for next steps
from pyspark.sql.functions import *
import pyspark.sql.functions as f
from pyspark.sql.window import Window
traject_data = spark.sql("""
SELECT DISTINCT person_id_deid, min(date) as date, covid_severity, trajectory_phenotype, phenotype_order from global_temp.ccu013_covid_trajectory_plot_data_tmp
GROUP BY person_id_deid, phenotype_order, trajectory_phenotype, covid_severity
ORDER BY person_id_deid, date, phenotype_order
""")
window = Window.partitionBy('person_id_deid').orderBy(['date', 'phenotype_order'])
# Calculate difference in days per ID
traject_data = traject_data.withColumn("days_passed", f.datediff(traject_data.date,
f.lag(traject_data.date, 1).over(window)))
#display(traject_data)
traject_data.createOrReplaceGlobalTempView("ccu013_covid_trajectory_graph_data")
drop_table("ccu013_covid_trajectory_graph_data")
create_table("ccu013_covid_trajectory_graph_data")
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Examplar output for one individual
# MAGIC SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data
# MAGIC WHERE person_id_deid = '00046L6S0IX8YE1'
# COMMAND ----------
# MAGIC %sql
# MAGIC REFRESH dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data
# COMMAND ----------
# MAGIC %sql
# MAGIC -- 56609049
# MAGIC -- 57032174
# MAGIC SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data
# COMMAND ----------
# MAGIC %md
# MAGIC ## 2.2 wave 1 - trajectory input
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Query to define all pople included in wave 1
# MAGIC --- This is used below to subset the trajectory graph data
# MAGIC --- SELECT * FROM
# MAGIC SELECT count(distinct a.person_id_deid) FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as a
# MAGIC --- Remove anyone with a
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-03-20") as t
# MAGIC ON a.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON a.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-03-20" OR death_date is null
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave1 AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC --- Remove anyone who had covid before the wave
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-03-20") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC --- Remove anyone who died before the wave
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-03-20" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2020-05-29"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave1")
create_table("ccu013_covid_trajectory_graph_data_wave1")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- old value - 56491308
# MAGIC -- current value = 56945027
# MAGIC SELECT count(distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old = 57035046
# MAGIC --- current value = 57490005
# MAGIC SELECT count(*) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old = 263,839
# MAGIC --- OBS Not sure this is in use any more
# MAGIC ---SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort_wave1
# COMMAND ----------
# MAGIC %sql
# MAGIC --- 3456753
# MAGIC --- 7232055
# MAGIC SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC ---SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC WHERE covid_severity != "00_unaffected" ---AND date <= date_add(TO_DATE("2020-05-29"),28)
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old value = 53034555
# MAGIC --- Current value @ = 49712972
# MAGIC SELECT count (distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC ---SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1
# MAGIC WHERE covid_severity == "00_unaffected" ---AND date <= date_add(TO_DATE("2020-05-29"),28)
# COMMAND ----------
# MAGIC %md
# MAGIC ## 2.3 wave 2 - trajectory input
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Old value = 55774208
# MAGIC --- New value @ 220122 = 56225024
# MAGIC --- Query to define all pople included in wave 2
# MAGIC --- This is used below to subset the trajectory graph data
# MAGIC SELECT count(distinct a.person_id_deid) FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as a
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-09-30") as t
# MAGIC ON a.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON a.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-09-30" OR death_date is null
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave2 AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-09-30") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-09-30" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2021-02-12"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave2")
create_table("ccu013_covid_trajectory_graph_data_wave2")
# COMMAND ----------
# MAGIC %md
# MAGIC # 3. Trajectory plot input - ICU only as Critical care.
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Create an ordered and simplified phenotype groups table
# MAGIC --- This includes all events includeing the uneffected event at the start of the pandemic for all individuals in study.
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_plot_data_icu_tmp AS
# MAGIC SELECT * FROM
# MAGIC (SELECT DISTINCT tab1.person_id_deid, tab1.date, tab1.covid_phenotype, tab2.covid_severity,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then 1
# MAGIC when "01_GP_covid_diagnosis" then 2
# MAGIC when "02_Covid_admission" then 3
# MAGIC when "03_NIV_treatment" then NULL
# MAGIC when "03_ICU_admission" then 4
# MAGIC when "03_IMV_treatment" then NULL
# MAGIC when "03_ECMO_treatment" then NULL
# MAGIC when "04_Fatal_with_covid_diagnosis" then 5
# MAGIC when "04_Fatal_without_covid_diagnosis" then 5
# MAGIC when "04_Covid_inpatient_death" then 5 ELSE NULL end) as phenotype_order,
# MAGIC (case covid_phenotype
# MAGIC when "01_Covid_positive_test" then "Positive test"
# MAGIC when "01_GP_covid_diagnosis" then "Primary care diagnosis"
# MAGIC when "02_Covid_admission" then "Hospitalisation"
# MAGIC when "03_NIV_treatment" then NULL
# MAGIC when "03_ICU_admission" then "ICU admission"
# MAGIC when "03_IMV_treatment" then NULL
# MAGIC when "03_ECMO_treatment" then NULL
# MAGIC when "04_Fatal_with_covid_diagnosis" then "Death"
# MAGIC when "04_Fatal_without_covid_diagnosis" then "Death"
# MAGIC when "04_Covid_inpatient_death" then "Death" ELSE NULL end) as trajectory_phenotype
# MAGIC FROM
# MAGIC dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort as tab1
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity as tab2 ON tab1.person_id_deid = tab2.person_id_deid
# MAGIC UNION ALL
# MAGIC SELECT person_id_deid, date, covid_phenotype, (CASE WHEN covid_severity IS NULL THEN '00_unaffected' ELSE covid_severity END) AS covid_severity, phenotype_order, trajectory_phenotype FROM (
# MAGIC SELECT NHS_NUMBER_DEID AS person_id_deid, DATE('2020-01-23') AS date, '00_Unaffected' AS covid_phenotype, covid_severity, 0 AS phenotype_order, 'Unaffected' AS trajectory_phenotype FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020 AS a
# MAGIC LEFT JOIN dars_nic_391419_j3w9t_collab.ccu013_covid_severity_paper_cohort AS b ON a.NHS_NUMBER_DEID = b.person_id_deid))
# MAGIC WHERE phenotype_order is not NULL
# MAGIC ORDER BY person_id_deid, date, phenotype_order
# COMMAND ----------
# MAGIC %sql
# MAGIC --- Exemplar of ordered data
# MAGIC SELECT * from global_temp.ccu013_covid_trajectory_plot_data_icu_tmp
# MAGIC WHERE person_id_deid = '00046L6S0IX8YE1'
# COMMAND ----------
## 3) Calculate days between events and write to table for further processing in R
### see ccu013 R script ccu013_trajectory_finder.R for next steps
from pyspark.sql.functions import *
import pyspark.sql.functions as f
from pyspark.sql.window import Window
traject_data = spark.sql("""
SELECT DISTINCT person_id_deid, min(date) as date, covid_severity, trajectory_phenotype, phenotype_order from global_temp.ccu013_covid_trajectory_plot_data_icu_tmp
GROUP BY person_id_deid, phenotype_order, trajectory_phenotype, covid_severity
ORDER BY person_id_deid, date, phenotype_order
""")
window = Window.partitionBy('person_id_deid').orderBy(['date', 'phenotype_order'])
# Calculate difference in days per ID
traject_data = traject_data.withColumn("days_passed", f.datediff(traject_data.date,
f.lag(traject_data.date, 1).over(window)))
#display(traject_data)
traject_data.createOrReplaceGlobalTempView("ccu013_covid_trajectory_graph_data_icu")
drop_table("ccu013_covid_trajectory_graph_data_icu")
create_table("ccu013_covid_trajectory_graph_data_icu")
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3.1 Wave 1
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave1_icu AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_icu as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC --- Remove anyone who had covid before the wave
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-03-20") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC --- Remove anyone who died before the wave
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-03-20" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2020-05-29"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave1_icu")
create_table("ccu013_covid_trajectory_graph_data_wave1_icu")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- OLD - = 56491308
# MAGIC -- New @ 220122 = 56945027
# MAGIC SELECT count(distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave1_icu
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3.2 Wave 2
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW ccu013_covid_trajectory_graph_data_wave2_icu AS
# MAGIC SELECT * FROM
# MAGIC (SELECT a.person_id_deid, a.date, a.covid_severity, a.trajectory_phenotype, a.phenotype_order, a.days_passed
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_icu as a
# MAGIC INNER JOIN (SELECT j.person_id_deid FROM
# MAGIC (SELECT DISTINCT NHS_NUMBER_DEID as person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_dp_skinny_patient_23_01_2020) as j
# MAGIC LEFT ANTI JOIN (SELECT person_id_deid FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_paper_cohort WHERE date < "2020-09-30") as t
# MAGIC ON j.person_id_deid = t.person_id_deid
# MAGIC LEFT JOIN (SELECT person_id_deid, min(death_date) as death_date FROM dars_nic_391419_j3w9t_collab.ccu013_tmp_deaths group by person_id_deid) as c
# MAGIC ON j.person_id_deid = c.person_id_deid
# MAGIC WHERE death_date > "2020-09-30" OR death_date is null ) as b
# MAGIC ON a.person_id_deid == b.person_id_deid
# MAGIC WHERE date <= date_add(TO_DATE("2021-02-12"),28))
# COMMAND ----------
drop_table("ccu013_covid_trajectory_graph_data_wave2_icu")
create_table("ccu013_covid_trajectory_graph_data_wave2_icu")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- OLD - 55774208
# MAGIC -- New = 56225024
# MAGIC SELECT count(distinct person_id_deid) FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory_graph_data_wave2_icu
# COMMAND ----------
# MAGIC %md
# MAGIC # 4 Reinfections (not currently used!)
# MAGIC - Identify all individuals who have had a reinfection with COVID-19
# MAGIC - __NB Not included in paper__ due to issues with non overlap on individual and time basis between sgss and pillar2
# COMMAND ----------
#import pyspark.sql.functions as funcs
#from pyspark.sql.window import Window
#reinfec = spark.sql("""
#SELECT person_id_deid, date FROM dars_nic_391419_j3w9t_collab.ccu013_covid_trajectory
#WHERE covid_phenotype in ('01_Covid_positive_test')
#""")
#reinfect_threshold = 90 # SIREN study
# Find days between consecutive positive COVID tests
# Define window to particion by
#window = Window.partitionBy('person_id_deid').orderBy('date')
# Calculate difference in days per ID
#reinfec = reinfec.withColumn("days_passed", funcs.datediff(reinfec.date,
# funcs.lag(reinfec.date, 1).over(window)))
# Save to table
#reinfec.createOrReplaceGlobalTempView("ccu013_covid_reinfection_days_between_positive_tests")
#drop_table("ccu013_covid_reinfection_days_between_positive_tests")
#create_table("ccu013_covid_reinfection_days_between_positive_tests")
# Get the maximum difference in days between positive tests per individual
#w = Window.partitionBy('person_id_deid')
#reinfec_max_days = reinfec.withColumn('max_days_passed', f.max('days_passed').over(w))\
# .where(f.col('days_passed') == f.col('max_days_passed'))\
# .drop('max_days_passed')
## Find reinfected using reinfect_threshold
#reinfec_max_days = reinfec_max_days.withColumn('reinfected', f.when((f.col('days_passed') >= reinfect_threshold),1).otherwise(0))
#reinfec_max_days = reinfec_max_days.where(f.col('reinfected') == 1)
# Save to table
#reinfec_max_days.createOrReplaceGlobalTempView("ccu013_covid_reinfected_after_90_days")
#drop_table("ccu013_covid_reinfected_after_90_days")
#create_table("ccu013_covid_reinfected_after_90_days")
# COMMAND ----------
| [
[
[
1549,
1552
],
[
3078,
3081
]
],
[
[
1554,
1557
]
],
[
[
1559,
1562
]
],
[
[
1585,
1591
]
],
[
[
1616,
1625
]
],
[
[
1647,
1655
],
[
3082,
3090
]
],
[
[
1686,
1694
]
],
[
[
3024,
3025
],
[
3119,
3124
],
[
3554,
3559
],
[
4329,
4333
],
[
4372,
4380
],
[
4556,
4560
],
[
7765,
7775
],
[
7816,
7828
],
[
9568,
9578
],
[
9617,
9629
]
],
[
[
3061,
3075
],
[
4215,
4229
]
],
[
[
3107,
3116
],
[
4025,
4034
]
],
[
[
3538,
3551
],
[
4006,
4019
]
],
[
[
3990,
4003
],
[
4084,
4097
]
],
[
[
4068,
4081
],
[
4177,
4190
]
],
[
[
4161,
4174
],
[
4246,
4259
],
[
4334,
4347
],
[
4381,
4394
],
[
4409,
4422
]
],
[
[
4231,
4244
],
[
4511,
4524
],
[
4562,
4575
],
[
4603,
4616
]
],
[
[
4495,
4508
],
[
4683,
4696
]
],
[
[
14949,
14950
],
[
15038,
15043
],
[
15733,
15743
],
[
15782,
15794
],
[
18536,
18546
],
[
18591,
18603
],
[
22255,
22265
],
[
22310,
22322
]
],
[
[
14958,
14984
],
[
15520,
15521
],
[
15585,
15586
]
],
[
[
15016,
15022
],
[
15353,
15359
]
],
[
[
15023,
15035
],
[
15481,
15493
],
[
15531,
15543
],
[
15591,
15603
]
],
[
[
15344,
15350
],
[
15618,
15624
]
],
[
[
15466,
15478
],
[
15652,
15664
]
],
[
[
25363,
25364
],
[
25452,
25457
],
[
26155,
26165
],
[
26208,
26220
],
[
27562,
27572
],
[
27621,
27633
],
[
29105,
29115
],
[
29164,
29176
]
],
[
[
25372,
25398
],
[
25938,
25939
],
[
26003,
26004
]
],
[
[
25430,
25436
],
[
25771,
25777
]
],
[
[
25437,
25449
],
[
25899,
25911
],
[
25949,
25961
],
[
26009,
26021
]
],
[
[
25762,
25768
],
[
26036,
26042
]
],
[
[
25884,
25896
],
[
26070,
26082
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from keyword import kwlist
from ._compat import isidentifier
dict_list = [x for x in dict.__dict__]
kwset = set(kwlist + dict_list) # this is faster than iskeyword()
pat_identifier = re.compile(r"^[a-zA-Z_]\w*$")
def is_invalid_key(s):
# type: (str) -> Bool
"""
Check if a string is not a valid identifier and thus unsuitable for use as a
Pstruct key.
Invalid
:param s: string to check
:type s: str
:return: True if string is invalid
:rtype: bool
>>> is_invalid_key('aoeu')
False
>>> is_invalid_key('[aoeu')
True
>>> is_invalid_key('2aoeu')
True
>>> is_invalid_key('_2aoeu')
False
>>> is_invalid_key('ao.eu')
True
>>> is_invalid_key('items')
True
"""
if s in kwset:
return True
return not isidentifier(s)
class InvalidKeyName(Exception):
"""Key is not a valid identifier"""
def __init__(self, key_or_keys):
msg = (
"The following keys cannot be used as a key because either it is a "
"builtin method, or is not a valid identifier: {}".format(key_or_keys)
)
super(InvalidKeyName, self).__init__(msg)
| [
[
[
53,
56
]
],
[
[
64,
66
],
[
253,
255
]
],
[
[
87,
93
],
[
180,
186
]
],
[
[
115,
127
],
[
868,
880
]
],
[
[
129,
138
],
[
189,
198
]
],
[
[
168,
173
],
[
826,
831
]
],
[
[
236,
250
]
],
[
[
289,
303
]
],
[
[
892,
906
],
[
1202,
1216
]
]
] |
# Owner(s): ["oncall: fx"]
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
class TestReshapeConverter(AccTestCase):
@parameterized.expand(
[
((1, 20),),
((1, 10, -1),),
]
)
def test_reshape(self, target_shape):
class TestModule(torch.nn.Module):
def __init__(self, target_shape):
super().__init__()
self.target_shape = target_shape
def forward(self, x):
return torch.reshape(x, self.target_shape)
inputs = [torch.randn(1, 2, 10)]
self.run_test(TestModule(target_shape), inputs, expected_ops={acc_ops.reshape})
@parameterized.expand(
[
((-1, 2),),
((1, 2, -1),),
]
)
def test_reshape_with_dynamic_shape(self, target_shape):
class TestModule(torch.nn.Module):
def __init__(self, target_shape):
super().__init__()
self.target_shape = target_shape
def forward(self, x):
return torch.reshape(x, self.target_shape)
input_specs = [
InputTensorSpec(
shape=(-1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
),
]
self.run_test_with_dynamic_shape(
TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape}
)
if __name__ == '__main__':
run_tests()
| [
[
[
35,
40
],
[
489,
494
],
[
750,
755
],
[
1052,
1057
],
[
1406,
1411
],
[
695,
700
],
[
1258,
1263
]
],
[
[
48,
95
],
[
843,
850
],
[
1619,
1626
]
],
[
[
146,
157
],
[
303,
314
]
],
[
[
159,
174
],
[
1331,
1346
]
],
[
[
201,
214
],
[
322,
335
],
[
867,
880
]
],
[
[
264,
273
],
[
1678,
1687
]
],
[
[
282,
302
]
]
] |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
from scipy import ndimage
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug.testutils import keypoints_equal, reseed
from imgaug.augmenters import meta
def main():
time_start = time.time()
test_GaussianBlur()
test_AverageBlur()
test_MedianBlur()
# TODO BilateralBlur
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_GaussianBlur():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# keypoints shouldnt be changed
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# varying blur sigmas
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
#############################
# test other dtypes below
# ndimage.gaussian_filter() rejects: float16
# float64 implementation in gaussian_filter() was too inaccurate
#############################
# --
# blur of various dtypes at sigma=0
# --
aug = iaa.GaussianBlur(sigma=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
_min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
for dtype in [np.float16, np.float32, np.float64]:
_min_value, center_value, _max_value = meta.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for bool
# --
aug = iaa.GaussianBlur(sigma=1.0)
# prototype kernel, generated via:
# mask = np.zeros((3, 3), dtype=np.float64)
# mask[1, 1] = 1.0
# mask = ndimage.gaussian_filter(mask, 1.0)
kernel = np.float64([
[0.08767308, 0.12075024, 0.08767308],
[0.12075024, 0.16630671, 0.12075024],
[0.08767308, 0.12075024, 0.08767308]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
expected = kernel > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image_aug = aug.augment_image(image)
expected = (kernel * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
for dtype in [np.float16, np.float32, np.float64]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image_aug = aug.augment_image(image)
expected = (kernel * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((3, 3), dtype=np.float64)
# mask[1, 1] = 1.0
# kernel = ndimage.gaussian_filter(mask, 0.4)
kernel = np.float64([
[0.00163144, 0.03712817, 0.00163144],
[0.03712817, 0.84496158, 0.03712817],
[0.00163144, 0.03712817, 0.00163144]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
expected = kernel > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image_aug = aug.augment_image(image)
expected = (kernel * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
for dtype in [np.float16, np.float32, np.float64]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image_aug = aug.augment_image(image)
expected = (kernel * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any
# sense here)
# The goal of this test is to verify that no major loss of resolution happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((3, 3), dtype=np.float64)
# mask[1, 1] = 1.0
# kernel = ndimage.gaussian_filter(mask, 0.75)
kernel = np.float64([
[0.05469418, 0.12447951, 0.05469418],
[0.12447951, 0.28330525, 0.12447951],
[0.05469418, 0.12447951, 0.05469418]
])
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
_min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
expected = (kernel * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
expected = (kernel * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.uint64, np.int64, np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
def test_AverageBlur():
reseed()
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, base_img)
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur3x3)
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur5x5)
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 100
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(base_img)
else:
possible[key] = cv2.blur(base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(base_img)
for key, img_aug in possible.items():
if np.array_equal(observed, img_aug):
nb_seen[key] += 1
# dont check sum here, because 0xX and Xx0 are all the same, i.e. much
# higher sum than nb_iterations
assert all([v > 0 for v in nb_seen.values()])
# keypoints shouldnt be changed
aug = iaa.AverageBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
#############################
# test other dtypes below
#############################
# --
# blur of various dtypes at k=0
# --
aug = iaa.AverageBlur(k=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
_min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value + 0.4 * max_value)
image[2, 2] = int(center_value + 0.4 * max_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
# --
# blur of various dtypes at k=3
# and using an example value of 100 for int/uint/float and True for bool
# --
aug = iaa.AverageBlur(k=3)
# prototype mask
# we place values in a 3x3 grid at positions (row=1, col=1) and (row=2, col=2) (beginning with 0)
# AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its default padding mode,
# see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html
# the matrix below shows the 3x3 grid and the padded row/col values around it
# [1, 0, 1, 0, 1]
# [0, 0, 0, 0, 0]
# [1, 0, 1, 0, 1]
# [0, 0, 0, 1, 0]
# [1, 0, 1, 0, 1]
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
expected = mask > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = np.round(mask * 100).astype(dtype) # cv2.blur() applies rounding for int/uint dtypes
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
for dtype in [np.float16, np.float32, np.float64]:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = (mask * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
# --
# blur of various dtypes at k=3
# and values being half-way between center and maximum for each dtype (bool is skipped as it doesnt make any
# sense here)
# The goal of this test is to verify that no major loss of resolution happens for large dtypes.
# --
aug = iaa.AverageBlur(k=3)
# prototype mask (see above)
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
_min_value, center_value, max_value = meta.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64) - expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16, 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
for dtype, value in zip([np.float16, np.float32, np.float64], [5000, 1000*1000, 1000*1000*1000]):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128) - expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
assert np.max(diff) < 2**(1 + np.dtype(dtype).itemsize)
# assert failure on invalid dtypes
aug = iaa.AverageBlur(k=3)
for dt in [np.uint32, np.uint64, np.int32, np.int64]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
def test_MedianBlur():
reseed()
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[3:8, 3:8, 0] = 1
base_img[4:7, 4:7, 0] = 2
base_img[5:6, 5:6, 0] = 3
blur3x3 = np.zeros_like(base_img)
blur3x3[3:8, 3:8, 0] = 1
blur3x3[4:7, 4:7, 0] = 2
blur3x3[4, 4, 0] = 1
blur3x3[4, 6, 0] = 1
blur3x3[6, 4, 0] = 1
blur3x3[6, 6, 0] = 1
blur3x3[3, 3, 0] = 0
blur3x3[3, 7, 0] = 0
blur3x3[7, 3, 0] = 0
blur3x3[7, 7, 0] = 0
blur5x5 = np.copy(blur3x3)
blur5x5[4, 3, 0] = 0
blur5x5[3, 4, 0] = 0
blur5x5[6, 3, 0] = 0
blur5x5[7, 4, 0] = 0
blur5x5[4, 7, 0] = 0
blur5x5[3, 6, 0] = 0
blur5x5[6, 7, 0] = 0
blur5x5[7, 6, 0] = 0
blur5x5[blur5x5 > 1] = 1
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no blur, shouldnt change anything
aug = iaa.MedianBlur(k=1)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, base_img)
# k=3
aug = iaa.MedianBlur(k=3)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur3x3)
# k=5
aug = iaa.MedianBlur(k=5)
observed = aug.augment_image(base_img)
assert np.array_equal(observed, blur5x5)
# k as (3, 5)
aug = iaa.MedianBlur(k=(3, 5))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
seen[0] = True
elif np.array_equal(observed, blur5x5):
seen[1] = True
else:
raise Exception("Unexpected result in MedianBlur@1")
if all(seen):
break
assert all(seen)
# k as stochastic parameter
aug = iaa.MedianBlur(k=iap.Choice([3, 5]))
seen = [False, False]
for i in sm.xrange(100):
observed = aug.augment_image(base_img)
if np.array_equal(observed, blur3x3):
seen[0] += True
elif np.array_equal(observed, blur5x5):
seen[1] += True
else:
raise Exception("Unexpected result in MedianBlur@2")
if all(seen):
break
assert all(seen)
# keypoints shouldnt be changed
aug = iaa.MedianBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_MotionBlur():
reseed()
# simple scenario
aug = iaa.MotionBlur(k=3, angle=0, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# 90deg angle
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# 45deg angle
aug = iaa.MotionBlur(k=3, angle=45, direction=0.0, order=0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0, 1.0/3],
[0, 1.0/3, 0],
[1.0/3, 0, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# random angle
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]
expected1 = np.float32([
[0, 1.0/3, 0],
[0, 1.0/3, 0],
[0, 1.0/3, 0]
])
expected2 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
# 5x5
aug = iaa.MotionBlur(k=5, angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected)
# random k
aug = iaa.MotionBlur(k=[3, 5], angle=90, direction=0.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]
expected1 = np.float32([
[0, 0, 0],
[1.0/3, 1.0/3, 1.0/3],
[0, 0, 0],
])
expected2 = np.float32([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1.0/5, 1.0/5, 1.0/5, 1.0/5, 1.0/5],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if matrix_channel.shape == expected1.shape and np.allclose(matrix_channel, expected1):
nb_seen[0] += 1
elif matrix_channel.shape == expected2.shape and np.allclose(matrix_channel, expected2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
# k with choice [a, b, c, ...] must error in case of non-discrete values
got_exception = False
try:
_ = iaa.MotionBlur(k=[3, 3.5, 4])
except Exception as exc:
assert "to only contain integer" in str(exc)
got_exception = True
assert got_exception
# no error in case of (a, b), checks for #215
aug = iaa.MotionBlur(k=(3, 7))
for _ in range(10):
_ = aug.augment_image(np.zeros((11, 11, 3), dtype=np.uint8))
# direction 1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=1.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
# direction -1.0
aug = iaa.MotionBlur(k=3, angle=0, direction=-1.0)
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(10)]
expected = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
for matrices_image in matrices:
for matrix_channel in matrices_image:
assert np.allclose(matrix_channel, expected, rtol=0, atol=1e-2)
# random direction
aug = iaa.MotionBlur(k=3, angle=[0, 90], direction=[-1.0, 1.0])
matrix_func = aug.matrix
matrices = [matrix_func(np.zeros((128, 128, 3), dtype=np.uint8), 3, ia.new_random_state(i)) for i in range(50)]
expected1 = np.float32([
[0, 1.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 0.0/1.5, 0]
])
expected2 = np.float32([
[0, 0.0/1.5, 0],
[0, 0.5/1.5, 0],
[0, 1.0/1.5, 0]
])
nb_seen = [0, 0]
for matrices_image in matrices:
assert np.allclose(matrices_image[0], matrices_image[1])
assert np.allclose(matrices_image[1], matrices_image[2])
for matrix_channel in matrices_image:
if np.allclose(matrix_channel, expected1, rtol=0, atol=1e-2):
nb_seen[0] += 1
elif np.allclose(matrix_channel, expected2, rtol=0, atol=1e-2):
nb_seen[1] += 1
assert nb_seen[0] > 0
assert nb_seen[1] > 0
# test of actual augmenter
img = np.zeros((7, 7, 3), dtype=np.uint8)
img[3-1:3+2, 3-1:3+2, :] = 255
aug = iaa.MotionBlur(k=3, angle=90, direction=0.0)
img_aug = aug.augment_image(img)
v1 = (255*(1/3))
v2 = (255*(1/3)) * 2
v3 = (255*(1/3)) * 3
expected = np.float32([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, v1, v2, v3, v2, v1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype(np.uint8)
expected = np.tile(expected[..., np.newaxis], (1, 1, 3))
assert np.allclose(img_aug, expected)
if __name__ == "__main__":
main()
| [
[
[
23,
37
]
],
[
[
39,
47
]
],
[
[
49,
64
]
],
[
[
73,
77
],
[
469,
473
],
[
592,
596
]
],
[
[
86,
96
],
[
97,
107
]
],
[
[
183,
194
],
[
749,
751
],
[
851,
853
],
[
891,
893
],
[
917,
919
],
[
1515,
1517
],
[
3403,
3405
],
[
3498,
3500
],
[
4091,
4093
],
[
4219,
4221
],
[
4239,
4241
],
[
4301,
4303
],
[
4311,
4313
],
[
4322,
4324
],
[
4333,
4335
],
[
4342,
4344
],
[
4352,
4354
],
[
4463,
4465
],
[
4638,
4640
],
[
4696,
4698
],
[
4708,
4710
],
[
4720,
4722
],
[
4833,
4835
],
[
5003,
5005
],
[
5384,
5386
],
[
5565,
5567
],
[
5721,
5723
],
[
5741,
5743
],
[
5806,
5808
],
[
5816,
5818
],
[
5827,
5829
],
[
5838,
5840
],
[
5847,
5849
],
[
5857,
5859
],
[
5884,
5886
],
[
6048,
6050
],
[
6072,
6074
],
[
6100,
6102
],
[
6171,
6173
],
[
6220,
6222
],
[
6232,
6234
],
[
6244,
6246
],
[
6273,
6275
],
[
6441,
6443
],
[
6465,
6467
],
[
6496,
6498
],
[
6570,
6572
],
[
6942,
6944
],
[
7123,
7125
],
[
7279,
7281
],
[
7299,
7301
],
[
7364,
7366
],
[
7374,
7376
],
[
7385,
7387
],
[
7396,
7398
],
[
7405,
7407
],
[
7415,
7417
],
[
7442,
7444
],
[
7606,
7608
],
[
7630,
7632
],
[
7658,
7660
],
[
7729,
7731
],
[
7778,
7780
],
[
7790,
7792
],
[
7802,
7804
],
[
7831,
7833
],
[
7999,
8001
],
[
8023,
8025
],
[
8054,
8056
],
[
8128,
8130
],
[
8706,
8708
],
[
8898,
8900
],
[
8908,
8910
],
[
8919,
8921
],
[
8930,
8932
],
[
8939,
8941
],
[
8949,
8951
],
[
9111,
9113
],
[
9279,
9281
],
[
9303,
9305
],
[
9331,
9333
],
[
9482,
9484
],
[
9506,
9508
],
[
9574,
9576
],
[
9586,
9588
],
[
9598,
9600
],
[
9663,
9665
],
[
9831,
9833
],
[
9855,
9857
],
[
9886,
9888
],
[
10058,
10060
],
[
10081,
10083
],
[
10200,
10202
],
[
10211,
10213
],
[
10221,
10223
],
[
10312,
10314
],
[
10539,
10541
],
[
10567,
10569
],
[
11247,
11249
],
[
11271,
11273
],
[
11286,
11288
],
[
11823,
11825
],
[
11847,
11849
],
[
11862,
11864
],
[
12408,
12410
],
[
12432,
12434
],
[
12447,
12449
],
[
12755,
12757
],
[
12886,
12888
],
[
13016,
13018
],
[
13247,
13249
],
[
13323,
13325
],
[
13784,
13786
],
[
13860,
13862
],
[
13936,
13938
],
[
14461,
14463
],
[
14537,
14539
],
[
15044,
15046
],
[
15146,
15148
],
[
15394,
15396
],
[
16183,
16185
],
[
16334,
16336
],
[
16354,
16356
],
[
16416,
16418
],
[
16426,
16428
],
[
16437,
16439
],
[
16446,
16448
],
[
16556,
16558
],
[
16807,
16809
],
[
16876,
16878
],
[
16888,
16890
],
[
16900,
16902
],
[
16965,
16967
],
[
17156,
17158
],
[
17841,
17843
],
[
17959,
17961
],
[
18136,
18138
],
[
18156,
18158
],
[
18221,
18223
],
[
18231,
18233
],
[
18242,
18244
],
[
18251,
18253
],
[
18278,
18280
],
[
18424,
18426
],
[
18525,
18527
],
[
18549,
18551
],
[
18577,
18579
],
[
18648,
18650
],
[
18697,
18699
],
[
18709,
18711
],
[
18721,
18723
],
[
18750,
18752
],
[
18944,
18946
],
[
18968,
18970
],
[
18999,
19001
],
[
19073,
19075
],
[
19454,
19456
],
[
19583,
19585
],
[
19593,
19595
],
[
19604,
19606
],
[
19613,
19615
],
[
19775,
19777
],
[
19969,
19971
],
[
19993,
19995
],
[
20021,
20023
],
[
20172,
20174
],
[
20196,
20198
],
[
20264,
20266
],
[
20276,
20278
],
[
20288,
20290
],
[
20353,
20355
],
[
20547,
20549
],
[
20571,
20573
],
[
20602,
20604
],
[
20774,
20776
],
[
20797,
20799
],
[
20909,
20911
],
[
20920,
20922
],
[
20931,
20933
],
[
20941,
20943
],
[
21029,
21031
],
[
21255,
21257
],
[
21283,
21285
],
[
21398,
21400
],
[
21695,
21697
],
[
22236,
22238
],
[
22366,
22368
],
[
22495,
22497
],
[
22696,
22698
],
[
22771,
22773
],
[
23166,
23168
],
[
23242,
23244
],
[
23966,
23968
],
[
23996,
23998
],
[
24069,
24071
],
[
24258,
24260
],
[
24427,
24429
],
[
24457,
24459
],
[
24530,
24532
],
[
24719,
24721
],
[
24897,
24899
],
[
24927,
24929
],
[
25000,
25002
],
[
25189,
25191
],
[
25364,
25366
],
[
25394,
25396
],
[
25468,
25470
],
[
25572,
25574
],
[
25733,
25735
],
[
25798,
25800
],
[
25909,
25911
],
[
25998,
26000
],
[
26245,
26247
],
[
26275,
26277
],
[
26348,
26350
],
[
26614,
26616
],
[
26785,
26787
],
[
26815,
26817
],
[
26889,
26891
],
[
26994,
26996
],
[
27231,
27233
],
[
27296,
27298
],
[
27451,
27453
],
[
27584,
27586
],
[
28139,
28141
],
[
28167,
28169
],
[
28310,
28312
],
[
28340,
28342
],
[
28413,
28415
],
[
28608,
28610
],
[
28799,
28801
],
[
28829,
28831
],
[
28902,
28904
],
[
29097,
29099
],
[
29303,
29305
],
[
29333,
29335
],
[
29407,
29409
],
[
29517,
29519
],
[
29683,
29685
],
[
29748,
29750
],
[
29859,
29861
],
[
29967,
29969
],
[
30152,
30154
],
[
30178,
30180
],
[
30401,
30403
],
[
30659,
30661
],
[
30684,
30686
],
[
30706,
30708
],
[
30741,
30743
]
],
[
[
202,
217
],
[
1008,
1010
],
[
1055,
1057
],
[
3137,
3139
],
[
13163,
13165
],
[
13700,
13702
],
[
14377,
14379
],
[
15260,
15262
],
[
22622,
22624
],
[
23092,
23094
]
],
[
[
225,
228
],
[
15112,
15115
]
],
[
[
247,
254
]
],
[
[
263,
275
],
[
1210,
1212
],
[
1231,
1233
],
[
1254,
1256
],
[
1315,
1317
],
[
12477,
12479
],
[
12498,
12500
],
[
12521,
12523
],
[
12582,
12584
],
[
21959,
21961
],
[
21980,
21982
],
[
22003,
22005
],
[
22064,
22066
],
[
24010,
24012
],
[
24471,
24473
],
[
24941,
24943
],
[
25408,
25410
],
[
26289,
26291
],
[
26829,
26831
],
[
28354,
28356
],
[
28843,
28845
],
[
29347,
29349
]
],
[
[
295,
312
],
[
1413,
1416
],
[
1593,
1596
],
[
2936,
2939
],
[
4041,
4044
],
[
5181,
5184
],
[
6737,
6740
],
[
8502,
8505
],
[
10157,
10160
],
[
12680,
12683
],
[
12811,
12814
],
[
12941,
12944
],
[
13079,
13082
],
[
13613,
13616
],
[
14281,
14284
],
[
14837,
14840
],
[
15671,
15674
],
[
16138,
16141
],
[
17328,
17331
],
[
19388,
19391
],
[
20873,
20876
],
[
22162,
22165
],
[
22292,
22295
],
[
22421,
22424
],
[
22558,
22561
],
[
23016,
23019
],
[
23492,
23495
],
[
23865,
23868
],
[
24325,
24328
],
[
24786,
24789
],
[
25257,
25260
],
[
26143,
26146
],
[
26678,
26681
],
[
27833,
27836
],
[
28060,
28063
],
[
28209,
28212
],
[
28697,
28700
],
[
29188,
29191
],
[
30233,
30236
]
],
[
[
332,
349
],
[
14299,
14302
],
[
23033,
23036
]
],
[
[
379,
394
],
[
2738,
2753
],
[
2863,
2878
],
[
15813,
15828
],
[
15938,
15953
],
[
23633,
23648
],
[
23758,
23773
]
],
[
[
396,
402
],
[
724,
730
],
[
10514,
10520
],
[
21230,
21236
],
[
23823,
23829
]
],
[
[
433,
437
],
[
4410,
4414
],
[
4780,
4784
],
[
9006,
9010
],
[
16503,
16507
],
[
19670,
19674
]
],
[
[
444,
448
],
[
30805,
30809
]
],
[
[
699,
716
],
[
486,
503
]
],
[
[
10490,
10506
],
[
510,
526
]
],
[
[
21207,
21222
],
[
533,
548
]
],
[
[
23800,
23815
]
]
] |
import sys
from setuptools import setup, find_packages
install_requires = [
'boto3>=1.2.3,<2.0',
'clint>0.5,<1.0',
'PyYAML>=3,<4.0',
'troposphere==2.0',
'Jinja2>=2.8,<3.0',
'six>1.9,<2.0'
]
# as of Python >= 2.7 argparse module is maintained within Python.
if sys.version_info < (2, 7):
install_requires.append('argparse>=1.1.0')
setup(
name='gordon',
version='0.7.0',
url='https://github.com/ZextrasGiacomoMattiuzzi/gordon',
license='BSD',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description='Gordon is a tool to create, wire and deploy AWS Lambdas using CloudFormation',
keywords="aws lambda apigateway kinesis dynamodb s3 cloudwatch",
packages=find_packages(),
platforms='any',
install_requires=install_requires,
test_suite='nose.collector',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Utilities'
],
entry_points={
'console_scripts': [
'gordon = gordon.bin:main',
]
},
include_package_data=True,
zip_safe=False,
use_2to3=True
)
| [
[
[
7,
10
],
[
287,
290
]
],
[
[
35,
40
],
[
363,
368
]
],
[
[
42,
55
],
[
736,
749
]
],
[
[
57,
73
],
[
318,
334
],
[
795,
811
]
]
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
target = '1kB'
radeg = np.pi/180
def cart_to_pol(x,y):
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y,x)
return r, phi
def pol_to_cart(r,phi):
x = r*np.cos(phi)
y = r*np.sin(phi)
return x, y
def L45(msun,mjup):
u2 = mjup/(msun+mjup)
x_L4 = 0.5 - u2
x_L5 = x_L4
y_L4 = np.sqrt(3)/2
y_L5 = -y_L4
return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])
def L45_nonnorm(xjup,yjup,xsun,ysun):
phi_jup = np.arctan2(yjup,xjup)
phi_L4 = phi_jup + np.pi/3
phi_L5 = phi_jup - np.pi/3
xsep = (xsun - xjup)
ysep = (ysun - yjup)
r_jupsol = np.sqrt(xsep**2 + ysep**2)
x_L4 = r_jupsol*np.cos(phi_L4)
x_L5 = r_jupsol*np.cos(phi_L5)
y_L4 = r_jupsol*np.sin(phi_L4)
y_L5 = r_jupsol*np.sin(phi_L5)
return np.array([x_L4,x_L5]), np.array([y_L4,y_L5])
def hill(a,e,m,M):
return a*(1-e)*np.power(m/(3*M),1/3)
def r_pol(r,psi,M1,M2,a):
q = M2/M1
z = np.zeros((len(psi),len(r)))
for i, phi in enumerate(psi):
x_ = r*np.cos(phi)
y_ = r*np.sin(phi)
x = x_/a
y = y_/a
s1 = np.sqrt(x**2 + y**2)
s2 = np.sqrt((x-1)**2 + y**2)
term1 = 2/(s1*(1+q))
term2 = 2*q/(s2*(1+q))
term3 = (x - q/(1+q))**2
term4 = y**2
z[i] = term1 + term2 + term3 + term4
return z
ast_d = np.load('{0}_Trojandata.npy'.format(target))
num_asts = len(ast_d[0,:,0])
print(ast_d.shape)
jup_d = np.load('{0}_Planetdata.npy'.format(target))
sol_d = np.load('{0}_Stardata.npy'.format(target))
times = np.load('{0}_Timesteps.npy'.format(target))
ast_a = ast_d[0]; ast_e = ast_d[1]; ast_i = ast_d[2]
ast_o = ast_d[3]; ast_p = ast_d[4]; ast_l = ast_d[5]
ast_x = ast_d[6]; ast_y = ast_d[7]; ast_z = ast_d[8]
ast_meda = np.median(ast_a,axis=0)
jup_a = jup_d[0]; jup_e = jup_d[1]; jup_i = jup_d[2]; jup_p = jup_d[3]
jup_l = jup_d[4]; jup_x = jup_d[5]; jup_y = jup_d[6]; jup_z = jup_d[7]
sol_m = sol_d[0]; sol_l = sol_d[1]; sol_x = sol_d[2]; sol_y = sol_d[3]; sol_z = sol_d[4]
jhill = hill(jup_a,jup_e,9.546e-4,sol_m)
dst_jall = np.sqrt((ast_x - jup_x)**2 + (ast_y - jup_y)**2)
L45x, L45y = L45_nonnorm(jup_x,jup_y,sol_x,sol_y)
L4_xs = L45x[0]; L4_ys = L45y[0]
L5_xs = L45x[1]; L5_ys = L45y[1]
i_dif = np.zeros_like(ast_i)
i_int = ast_i[:,0]
for i in range(len(ast_a[0,:])):
i_dif[:,i] = ast_i[:,i] - i_int
phi_vals = np.linspace(-np.pi,np.pi,500)
Z = r_pol(jup_a,phi_vals,sol_m,9.546e-4,jup_a)
Pot = np.flip(Z,1)
ast_r, ast_h = cart_to_pol(ast_x,ast_y)
jup_r, jup_h = cart_to_pol(jup_x,jup_y)
phdif = np.zeros_like(ast_h)
for i in range(len(jup_h)):
phdif[:,i] = ast_h[:,i] - jup_h[i]
id4 = []
id5 = []
for i in range(num_asts):
for it in range(len(jup_h)):
if phdif[i,it] < -np.pi:
phdif[i,it] = phdif[i,it] + 2*np.pi
if phdif[i,it] > np.pi:
phdif[i,it] = phdif[i,it] - 2*np.pi
if phdif[i,0] > 0:
id4.append(i)
if phdif[i,0] < 0:
id5.append(i)
print('Percentage at L4: %2.1f' %(len(id4)*100/num_asts))
liba = np.zeros((num_asts,200))
libp = np.zeros((num_asts,200))
for i in range(num_asts):
for n in range(200):
high = int(500*(n+1))
loww = int(500*n)
pmax = np.amax(phdif[i,loww:high])
pmin = np.amin(phdif[i,loww:high])
amax = np.amax(ast_a[i,loww:high])
amin = np.amin(ast_a[i,loww:high])
amid = np.median(jup_a[loww:high])
if pmax > 0:
mid = np.pi/3
if pmax < 0:
mid = -np.pi/3
lip = ((pmax - mid) + (pmin - mid)) / 2
lia = ((amax - amid)+(amin - amid)) / 2
libp[i,n] = abs(lip)
liba[i,n] = abs(lia)
indices = []
hillers = []
for i in range(num_asts):
it = 0
while it < len(ast_meda):
a_focus = ast_a[i,it]
a_media = ast_meda[it]
if a_focus > a_media + 2:
indices.append(i)
break
elif a_focus < a_media - 2:
indices.append(i)
break
else:
it += 1
it = 0
while it < len(jhill):
d = dst_jall[i,it]
h = jhill[it]
if d <= h + 0.1:
hillers.append(i)
break
else:
it += 1
idx = np.array(indices)
hdx = np.array(hillers)
hill_not_sma = np.array(list(set(hillers) - set(indices)))
ndx = np.array(list(set(range(num_asts)) - set(indices)))
print("Number of escapers: ", len(indices))
print("Number of hill crossers: ", len(hillers))
pct = len(indices)/num_asts
print('Pct escaped / Total Asts: %0.2f' %pct)
nrm_a = ast_a[ndx]; nrm_e = ast_e[ndx]; nrm_i = ast_i[ndx]; ndifi = i_dif[ndx]; nrmla = liba[ndx]
nrm_p = ast_p[ndx]; nrm_l = ast_l[ndx]; nrm_x = ast_x[ndx]; nrm_y = ast_y[ndx]; nrmlp = libp[ndx]
odd_a = ast_a[idx]; odd_e = ast_e[idx]; odd_i = ast_i[idx]; odifi = i_dif[idx]; oddla = liba[idx]
odd_p = ast_p[idx]; odd_l = ast_l[idx]; odd_x = ast_x[idx]; odd_y = ast_y[idx]; oddlp = libp[idx]
nrm_r, nrmph = cart_to_pol(nrm_x,nrm_y); odd_r, oddph = cart_to_pol(odd_x,odd_y)
jup_r, jupph = cart_to_pol(jup_x,jup_y); sol_r, solph = cart_to_pol(sol_x,sol_y)
L4_rs, L4phs = cart_to_pol(L4_xs,L4_ys); L5_rs, L5phs = cart_to_pol(L5_xs,L5_ys)
distj = np.sqrt((odd_x - jup_x)**2 + (odd_y - jup_y)**2)
disth = np.sqrt((ast_x[hdx] - jup_x)**2 + (ast_y[hdx] - jup_y)**2)
dists = np.sqrt((odd_x - sol_x)**2 + (odd_y - sol_y)**2)
jdist = np.sqrt((jup_x - sol_x)**2 + (jup_y - sol_y)**2)
earlies = []
laties = []
hill_cross = np.zeros(len(hdx))
for i in range(len(odd_a)):
it = 0
while it < 100000:
a_focus = odd_a[i,it]
a_media = ast_meda[it]
if a_focus > a_media + 2:
if it < 33333:
earlies.append(i)
break
elif it > 70000:
laties.append(i)
break
else:
break
elif a_focus < a_media - 2:
if it < 33333:
earlies.append(i)
break
elif it > 70000:
laties.append(i)
break
else:
break
else:
it += 1
for i in range(len(hdx)):
it = 0
while it < 100000:
d = disth[i,it]
h = jhill[it]
if d <= h:
hill_cross[i] = it
break
else:
it += 1
horses = []
for number,n in enumerate(idx):
i = 0
while i < 5000:
val = phdif[n,i]
if 170*radeg <= val:
horses.append(n)
break
elif val <= -170*radeg:
horses.append(n)
break
elif -5*radeg <= val <= 5*radeg:
horses.append(n)
break
i += 1
hrs = np.array(horses)
trs = np.array( list( set(idx) - set(horses) ) )
edx = np.array(earlies)
ldx = np.array(laties)
print("Number of early escapees: ", len(earlies), " (escaped before .67 Myr)")
print("Number of late escapees: ", len(laties), " (escaped after %1.2f Myr)" %(times[70000]/1e6))
pct_e = len(earlies)/len(indices)
pct_l = len(laties)/len(indices)
print('Number early / Total escapees: %0.2f' %pct_e)
print('Number late / Total escapees: %0.2f' %pct_l)
pcT_e = len(earlies)/num_asts
pcT_l = len(laties)/num_asts
print('Number early / Total Asts.: %0.2f' %pcT_e)
print('Number late / Total Asts.: %0.2f' %pcT_l)
x_axis = np.linspace(0,times[33333]/1e6)
x_axi2 = np.linspace(times[70000]/1e6,times[-1]/1e6)
fig, ax = plt.subplots(3,figsize=(14,13),sharex=True,gridspec_kw={'height_ratios': [3, 1, .75]})
plt.subplots_adjust(hspace=0)
ax[0].plot(times/1e6,ast_meda,'k',lw=3)
ax[0].vlines([times[33333]/1e6,times[70000]/1e6],5,9.5,'b',alpha=0.8,zorder=0)
ax[0].fill_between(x_axis,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[0].fill_between(x_axi2,5*np.ones_like(x_axis),9.5*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[0].plot(times/1e6,jup_a,'gold',lw=3)
ax[0].legend(['Median Ast.','Planet'],fontsize=16,frameon=False,loc='upper left')
ax[0].set_ylabel('Semimajor Axis / AU',fontsize=16)
ax[0].set_ylim(5,9.5)
ax[0].set_xlim(0,2)
ax[0].text(0.18,7.25,"%1.i escaped" %len(earlies),fontsize=25)
ax[0].text(0.8,7.25,"%2.i escaped" %(len(indices) - len(earlies) - len(laties)),fontsize=25)
ax[0].text(1.48,7.25,"%2.i escaped" %len(laties),fontsize=25)
ax[1].plot(times/1e6,sol_l,'orange',lw=3,zorder=10)
ax[1].plot(times/1e6,sol_m,'g',ls=':',lw=3,zorder=10)
ax[1].vlines([times[33333]/1e6,times[70000]/1e6],0,4,'b',alpha=0.8,zorder=0)
ax[1].legend(["log Stellar Luminosity", "Stellar Mass"],fontsize=16,loc='center left',frameon=False)
ax[1].set_ylabel("Solar Units",fontsize=16)
ax[1].set_ylim(0,4)
ax[1].fill_between(x_axis,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[1].fill_between(x_axi2,0*np.ones_like(x_axis),4*np.ones_like(x_axis),facecolor='b',alpha=0.2,zorder=0)
ax[1].set_xlabel('Time / Myr',fontsize=16)
ax[1].set_yticks([0,1,2,3])
ax[2].hist(hill_cross*20/1e6,edgecolor='k',facecolor='k',alpha=0.5,range=[0,2],bins=20)
ax[2].set_ylabel("Escapes",fontsize=16)
ax[2].set_xlabel("Time / Myr",fontsize=16)
ax[2].set_ylim(0,35)
ax[2].set_yticks([0,10,20,30])
fig.savefig('{0}_Timeseries.pdf'.format(target),dpi=300)
############
hist, axh = plt.subplots(1,4,figsize=(20,5))
axh[0].hist(nrm_a[:,0],edgecolor='k',histtype='step',range=[4.95,5.45])
axh[0].hist(odd_a[:,0],facecolor='r',alpha=0.7,range=[4.95,5.45])
axh[0].set_xlabel("SMA (AU)",fontsize=16)
axh[0].set_xlim(4.95,5.45)
axh[1].hist(nrm_e[:,0],edgecolor='k',histtype='step',range=[0,.25])
axh[1].hist(odd_e[:,0],facecolor='r',alpha=0.7,range=[0,.25])
axh[1].set_xlabel("Eccentricity",fontsize=16)
axh[1].set_xlim(0,0.25)
axh[2].hist(abs(nrmla[:,0]),edgecolor='k',histtype='step',range=[0,0.02],bins=20)
axh[2].hist(abs(liba[trs,0]),facecolor='r',alpha=0.7,range=[0,0.02],bins=20)
axh[2].set_xlabel("SMA Libration Amp. (AU)",fontsize=16)
axh[2].set_xlim(0,.02)
axh[2].set_xticks([0,0.005,0.01,0.015,0.02])
radeg = np.pi/180
axh[3].hist(abs(nrmlp[:,0])/radeg,edgecolor='k',histtype='step',range=[0,35])
axh[3].hist(abs(libp[trs,0])/radeg,facecolor='r',alpha=0.7,range=[0,35])
axh[3].set_xlabel(r"$\lambda$ Libration Amplitude (Deg.)",fontsize=16)
axh[3].set_xlim(0,35)
axh[3].legend(labels=['Stable','Escaped'],fontsize=14,frameon=False,loc='upper right')
hist.suptitle('Initial conditions',fontsize=18)
hist.savefig('{0}_Histograms.pdf'.format(target),dpi=300)
#############
orf, ora = plt.subplots(1,2,figsize=(15,5),gridspec_kw={'width_ratios': [2, 1]})
for i in range(len(ndx)):
ora[0].plot(phdif[ndx[i],:500],ast_a[ndx[i],:500]/5.2,'k',alpha=0.01,zorder=5)
for i,tr in enumerate(trs):
ora[0].plot(phdif[tr,:500],ast_a[tr,:500]/5.2,'r',alpha=0.05,zorder=10)
ora[0].set_xlim(-np.pi,np.pi)
ora[0].set_ylim(.9,1.1)
ora[0].set_xlabel(r"$\phi - \phi_{jup}$",fontsize=16)
ora[0].set_ylabel(r"SMA / $a_{jup}$",fontsize=16)
ora[0].vlines([-np.pi/3,np.pi/3],0.9,1.1,ls='--',zorder=0)
ora[0].set_xticks([-np.pi,-np.pi/2,-np.pi/3,0,np.pi/3,np.pi/2,np.pi])
ora[0].set_xticklabels([r"-$\pi$",r"-$\pi$/2",r"$L_5$",'0',r"$L_4$",r"$\pi$/2",r"$\pi$"])
sns.kdeplot(abs(nrmlp[:,0])/radeg,nrmla[:,0],shade=True,shade_lowest=None,cmap='Greys',levels=5,alpha=0.5)
sns.kdeplot(abs(libp[trs,0])/radeg,liba[trs,0],shade=True,shade_lowest=None,cmap='Reds',levels=5,alpha=0.5)
ora[1].set_ylabel("Init. SMA Libration (AU)",fontsize=16)
ora[1].set_xlabel(r"Init. $\lambda$ Libration (Deg.)",fontsize=16)
ora[1].set_xlim(0,35)
orf.tight_layout()
orf.savefig('{0}_Orbits.pdf'.format(target),dpi=300)
#############
norm = mpl.colors.Normalize(vmin = np.min(.005), vmax = np.max(.015), clip = False)
tim, tax = plt.subplots(figsize=(7,6))
scatter = tax.scatter(abs(libp[hdx,0])/radeg,hill_cross*20/1e6,c=abs(liba[hdx,0]),cmap='Reds',norm=norm)
tax.set_xlim(0,35)
tax.set_xlabel(r"Initial $\lambda$ Libration (Deg.)",fontsize=16)
tax.set_ylabel('Time of Encounter (Myr)',fontsize=16)
tim.colorbar(scatter, label='Initial SMA Libration (AU)')
tax.set_ylim(0,2)
tim.savefig('{0}_Eject_Perts.pdf'.format(target),dpi=300)
######################
hill_data = np.array((hdx,hill_cross))
np.save('{0}_Ejects.npy'.format(target), idx)
np.save('{0}_Hillcr.npy'.format(target), hill_data) | [
[
[
57,
68
],
[
172,
174
],
[
1527,
1529
],
[
1629,
1631
],
[
1682,
1684
],
[
1733,
1735
],
[
1949,
1951
],
[
2257,
2259
],
[
2432,
2434
],
[
2557,
2559
],
[
2570,
2572
],
[
2576,
2578
],
[
2640,
2642
],
[
2742,
2744
],
[
2938,
2940
],
[
2987,
2989
],
[
3018,
3020
],
[
3067,
3069
],
[
3238,
3240
],
[
3270,
3272
],
[
3417,
3419
],
[
3460,
3462
],
[
3503,
3505
],
[
3546,
3548
],
[
3589,
3591
],
[
3665,
3667
],
[
3713,
3715
],
[
4452,
4454
],
[
4476,
4478
],
[
4510,
4512
],
[
4560,
4562
],
[
5449,
5451
],
[
5506,
5508
],
[
5573,
5575
],
[
5630,
5632
],
[
5718,
5720
],
[
6985,
6987
],
[
7008,
7010
],
[
7082,
7084
],
[
7106,
7108
],
[
7674,
7676
],
[
7715,
7717
],
[
8036,
8038
],
[
8061,
8063
],
[
8144,
8146
],
[
8169,
8171
],
[
9035,
9037
],
[
9058,
9060
],
[
9141,
9143
],
[
9164,
9166
],
[
10334,
10336
],
[
11109,
11111
],
[
11115,
11117
],
[
11266,
11268
],
[
11274,
11276
],
[
11329,
11331
],
[
11336,
11338
],
[
11345,
11347
],
[
11355,
11357
],
[
11363,
11365
],
[
11371,
11373
],
[
11955,
11957
],
[
11976,
11978
],
[
12459,
12461
],
[
12486,
12488
],
[
12532,
12534
],
[
213,
215
],
[
244,
246
],
[
313,
315
],
[
335,
337
],
[
467,
469
],
[
513,
515
],
[
536,
538
],
[
611,
613
],
[
661,
663
],
[
692,
694
],
[
775,
777
],
[
827,
829
],
[
862,
864
],
[
897,
899
],
[
932,
934
],
[
963,
965
],
[
986,
988
],
[
1047,
1049
],
[
1119,
1121
],
[
1196,
1198
],
[
1223,
1225
],
[
1282,
1284
],
[
1316,
1318
]
],
[
[
76,
93
],
[
11927,
11930
]
],
[
[
101,
125
],
[
7770,
7773
],
[
7857,
7860
],
[
9598,
9601
],
[
10809,
10812
],
[
12016,
12019
]
],
[
[
133,
147
],
[
11470,
11473
],
[
11577,
11580
]
],
[
[
149,
155
],
[
1563,
1569
],
[
1665,
1671
],
[
1716,
1722
],
[
1768,
1774
],
[
9554,
9560
],
[
10765,
10771
],
[
11887,
11893
],
[
12405,
12411
],
[
12518,
12524
],
[
12564,
12570
]
],
[
[
164,
169
],
[
6727,
6732
],
[
6813,
6818
],
[
6883,
6888
],
[
6901,
6906
]
],
[
[
187,
198
],
[
2669,
2680
],
[
2709,
2720
],
[
5212,
5223
],
[
5253,
5264
],
[
5293,
5304
],
[
5334,
5345
],
[
5374,
5385
],
[
5415,
5426
]
],
[
[
283,
294
]
],
[
[
368,
371
]
],
[
[
563,
574
],
[
2320,
2331
]
],
[
[
1013,
1017
],
[
2213,
2217
]
],
[
[
1075,
1080
],
[
2591,
2596
]
],
[
[
1519,
1524
],
[
1587,
1592
],
[
1607,
1612
],
[
1786,
1791
],
[
1804,
1809
],
[
1822,
1827
],
[
1840,
1845
],
[
1858,
1863
],
[
1876,
1881
],
[
1893,
1898
],
[
1911,
1916
],
[
1929,
1934
]
],
[
[
1572,
1580
],
[
2868,
2876
],
[
3219,
3227
],
[
3248,
3256
],
[
3280,
3288
],
[
3310,
3318
],
[
3938,
3946
],
[
4584,
4592
],
[
4742,
4750
],
[
7515,
7523
],
[
7544,
7552
]
],
[
[
1621,
1626
],
[
1982,
1987
],
[
2000,
2005
],
[
2018,
2023
],
[
2036,
2041
],
[
2053,
2058
],
[
2071,
2076
],
[
2089,
2094
],
[
2107,
2112
]
],
[
[
1674,
1679
],
[
2124,
2129
],
[
2142,
2147
],
[
2160,
2165
],
[
2178,
2183
],
[
2196,
2201
]
],
[
[
1725,
1730
],
[
7298,
7303
],
[
7688,
7693
],
[
7727,
7732
],
[
7744,
7749
],
[
7899,
7904
],
[
7942,
7947
],
[
7959,
7964
],
[
8235,
8240
],
[
8670,
8675
],
[
8722,
8727
],
[
8779,
8784
],
[
8796,
8801
]
],
[
[
1778,
1783
],
[
1959,
1964
],
[
2491,
2496
],
[
3511,
3516
],
[
3554,
3559
],
[
4008,
4013
],
[
4810,
4815
],
[
5008,
5013
],
[
10940,
10945
],
[
11047,
11052
]
],
[
[
1796,
1801
],
[
4830,
4835
],
[
5028,
5033
]
],
[
[
1814,
1819
],
[
2446,
2451
],
[
2461,
2466
],
[
2522,
2527
],
[
4850,
4855
],
[
5048,
5053
]
],
[
[
1832,
1837
]
],
[
[
1850,
1855
],
[
4908,
4913
],
[
5106,
5111
]
],
[
[
1868,
1873
],
[
4928,
4933
],
[
5126,
5131
]
],
[
[
1885,
1890
],
[
2266,
2271
],
[
2681,
2686
],
[
4948,
4953
],
[
5146,
5151
],
[
5515,
5520
]
],
[
[
1903,
1908
],
[
2287,
2292
],
[
2687,
2692
],
[
4968,
4973
],
[
5166,
5171
],
[
5541,
5546
]
],
[
[
1921,
1926
]
],
[
[
1938,
1946
],
[
3979,
3987
],
[
4038,
4046
],
[
5848,
5856
],
[
7909,
7917
]
],
[
[
1974,
1979
],
[
2218,
2223
],
[
2597,
2602
],
[
2627,
2632
],
[
3599,
3604
],
[
8245,
8250
]
],
[
[
1992,
1997
],
[
2224,
2229
]
],
[
[
2010,
2015
]
],
[
[
2028,
2033
]
],
[
[
2045,
2050
]
],
[
[
2063,
2068
],
[
2274,
2279
],
[
2332,
2337
],
[
2721,
2726
],
[
5305,
5310
],
[
5466,
5471
],
[
5528,
5533
],
[
5639,
5644
]
],
[
[
2081,
2086
],
[
2295,
2300
],
[
2338,
2343
],
[
2727,
2732
],
[
5311,
5316
],
[
5487,
5492
],
[
5554,
5559
],
[
5660,
5665
]
],
[
[
2099,
2104
]
],
[
[
2116,
2121
],
[
2239,
2244
],
[
2612,
2617
],
[
8732,
8737
]
],
[
[
2134,
2139
],
[
8680,
8685
]
],
[
[
2152,
2157
],
[
2344,
2349
],
[
5346,
5351
],
[
5590,
5595
],
[
5647,
5652
]
],
[
[
2170,
2175
],
[
2350,
2355
],
[
5352,
5357
],
[
5611,
5616
],
[
5668,
5673
]
],
[
[
2188,
2193
]
],
[
[
2205,
2210
],
[
4281,
4286
],
[
4328,
4333
],
[
6488,
6493
]
],
[
[
2246,
2254
],
[
4301,
4309
]
],
[
[
2307,
2311
],
[
2365,
2369
],
[
2398,
2402
]
],
[
[
2313,
2317
],
[
2382,
2386
],
[
2415,
2419
]
],
[
[
2357,
2362
],
[
5386,
5391
]
],
[
[
2374,
2379
],
[
5392,
5397
]
],
[
[
2390,
2395
],
[
5427,
5432
]
],
[
[
2407,
2412
],
[
5433,
5438
]
],
[
[
2424,
2429
],
[
2509,
2514
],
[
4870,
4875
],
[
5068,
5073
]
],
[
[
2453,
2458
],
[
2535,
2540
]
],
[
[
2476,
2477
],
[
2530,
2531
],
[
2517,
2518
]
],
[
[
2546,
2554
],
[
2603,
2611
]
],
[
[
2587,
2588
],
[
2648,
2649
]
],
[
[
2634,
2637
]
],
[
[
2654,
2659
]
],
[
[
2661,
2666
],
[
2756,
2761
],
[
2808,
2813
]
],
[
[
2694,
2699
]
],
[
[
2701,
2706
],
[
2782,
2787
],
[
2821,
2826
],
[
2903,
2908
]
],
[
[
2734,
2739
],
[
2795,
2800
],
[
2923,
2928
],
[
2971,
2976
],
[
2957,
2962
],
[
3004,
3009
],
[
3051,
3056
],
[
3037,
3042
],
[
3080,
3085
],
[
3125,
3130
],
[
3425,
3430
],
[
3468,
3473
],
[
6701,
6706
],
[
10921,
10926
],
[
11032,
11037
]
],
[
[
2767,
2768
],
[
2816,
2817
],
[
2827,
2828
],
[
2803,
2804
]
],
[
[
2835,
2838
],
[
3104,
3107
],
[
3210,
3213
]
],
[
[
2844,
2847
],
[
3149,
3152
]
],
[
[
2857,
2858
],
[
2929,
2930
],
[
2977,
2978
],
[
2963,
2964
],
[
3010,
3011
],
[
3057,
3058
],
[
3043,
3044
],
[
3086,
3087
],
[
3115,
3116
],
[
3131,
3132
],
[
3160,
3161
]
],
[
[
2887,
2889
],
[
2931,
2933
],
[
2979,
2981
],
[
2965,
2967
],
[
3012,
3014
],
[
3059,
3061
],
[
3045,
3047
]
],
[
[
3231,
3235
],
[
3867,
3871
],
[
4890,
4894
],
[
5088,
5092
],
[
10139,
10143
],
[
11612,
11616
],
[
12113,
12117
]
],
[
[
3263,
3267
],
[
3838,
3842
],
[
4988,
4992
],
[
5186,
5190
],
[
10438,
10442
],
[
11593,
11597
],
[
12070,
12074
]
],
[
[
3299,
3300
],
[
3431,
3432
],
[
3474,
3475
],
[
3517,
3518
],
[
3560,
3561
],
[
3843,
3844
],
[
3872,
3873
]
],
[
[
3329,
3330
],
[
3370,
3371
],
[
3399,
3400
],
[
3845,
3846
],
[
3874,
3875
]
],
[
[
3354,
3358
],
[
3438,
3442
],
[
3481,
3485
],
[
3524,
3528
],
[
3567,
3571
],
[
3610,
3614
]
],
[
[
3384,
3388
],
[
3433,
3437
],
[
3476,
3480
],
[
3519,
3523
],
[
3562,
3566
],
[
3605,
3609
]
],
[
[
3410,
3414
],
[
3637,
3641
],
[
3684,
3688
],
[
3750,
3754
]
],
[
[
3453,
3457
],
[
3765,
3769
]
],
[
[
3496,
3500
],
[
3798,
3802
]
],
[
[
3539,
3543
],
[
3812,
3816
]
],
[
[
3582,
3586
],
[
3805,
3809
],
[
3819,
3823
]
],
[
[
3659,
3662
],
[
3757,
3760
],
[
3772,
3775
]
],
[
[
3706,
3709
],
[
3757,
3760
],
[
3772,
3775
]
],
[
[
3742,
3745
],
[
3854,
3857
]
],
[
[
3790,
3793
],
[
3883,
3886
]
],
[
[
3897,
3904
],
[
4097,
4104
],
[
4181,
4188
],
[
4461,
4468
],
[
4543,
4550
],
[
4601,
4608
],
[
4658,
4665
],
[
4733,
4740
],
[
7342,
7349
],
[
7375,
7382
],
[
8544,
8551
]
],
[
[
3910,
3917
],
[
4375,
4382
],
[
4485,
4492
],
[
4528,
4535
],
[
4713,
4720
]
],
[
[
3927,
3928
],
[
4014,
4015
],
[
4112,
4113
],
[
4196,
4197
],
[
4310,
4311
],
[
4390,
4391
]
],
[
[
3953,
3955
],
[
3970,
3972
],
[
4016,
4018
],
[
4047,
4049
],
[
4243,
4245
]
],
[
[
3998,
4005
],
[
4062,
4069
],
[
4146,
4153
]
],
[
[
4028,
4035
],
[
4072,
4079
],
[
4156,
4163
]
],
[
[
4255,
4257
],
[
4272,
4274
],
[
4312,
4314
],
[
4334,
4336
],
[
4437,
4439
]
],
[
[
4297,
4298
],
[
4349,
4350
]
],
[
[
4324,
4325
],
[
4354,
4355
]
],
[
[
4446,
4449
],
[
5014,
5017
],
[
5034,
5037
],
[
5054,
5057
],
[
5074,
5077
],
[
5093,
5096
],
[
5112,
5115
],
[
5132,
5135
],
[
5152,
5155
],
[
5172,
5175
],
[
5191,
5194
],
[
6651,
6654
],
[
7028,
7031
],
[
12527,
12530
]
],
[
[
4470,
4473
],
[
5521,
5524
],
[
5547,
5550
],
[
5731,
5734
],
[
6411,
6414
],
[
12075,
12078
],
[
12118,
12121
],
[
12469,
12472
]
],
[
[
4495,
4507
]
],
[
[
4554,
4557
],
[
4816,
4819
],
[
4836,
4839
],
[
4856,
4859
],
[
4876,
4879
],
[
4895,
4898
],
[
4914,
4917
],
[
4934,
4937
],
[
4954,
4957
],
[
4974,
4977
],
[
4993,
4996
],
[
10898,
10901
],
[
10927,
10930
],
[
10946,
10949
]
],
[
[
4723,
4726
],
[
4796,
4799
]
],
[
[
4802,
4807
],
[
9644,
9649
]
],
[
[
4822,
4827
],
[
9852,
9857
]
],
[
[
4842,
4847
]
],
[
[
4862,
4867
]
],
[
[
4882,
4887
],
[
10057,
10062
],
[
11504,
11509
]
],
[
[
4900,
4905
]
],
[
[
4920,
4925
]
],
[
[
4940,
4945
],
[
5224,
5229
]
],
[
[
4960,
4965
],
[
5230,
5235
]
],
[
[
4980,
4985
],
[
10360,
10365
],
[
11486,
11491
]
],
[
[
5000,
5005
],
[
5757,
5762
],
[
5818,
5823
],
[
9716,
9721
]
],
[
[
5020,
5025
],
[
9920,
9925
]
],
[
[
5040,
5045
]
],
[
[
5060,
5065
]
],
[
[
5080,
5085
]
],
[
[
5098,
5103
]
],
[
[
5118,
5123
]
],
[
[
5138,
5143
],
[
5265,
5270
],
[
5458,
5463
],
[
5582,
5587
]
],
[
[
5158,
5163
],
[
5271,
5276
],
[
5479,
5484
],
[
5603,
5608
]
],
[
[
5178,
5183
]
],
[
[
5197,
5202
]
],
[
[
5204,
5209
]
],
[
[
5238,
5243
]
],
[
[
5245,
5250
]
],
[
[
5278,
5283
]
],
[
[
5285,
5290
]
],
[
[
5319,
5324
]
],
[
[
5326,
5331
]
],
[
[
5359,
5364
]
],
[
[
5366,
5371
]
],
[
[
5400,
5405
]
],
[
[
5407,
5412
]
],
[
[
5441,
5446
]
],
[
[
5498,
5503
],
[
6464,
6469
]
],
[
[
5565,
5570
]
],
[
[
5622,
5627
]
],
[
[
5680,
5687
],
[
5938,
5945
],
[
6181,
6188
],
[
7091,
7098
],
[
7170,
7177
],
[
7329,
7336
],
[
7506,
7513
],
[
8481,
8488
],
[
8559,
8566
]
],
[
[
5693,
5699
],
[
6023,
6029
],
[
6266,
6272
],
[
7115,
7121
],
[
7256,
7262
],
[
7363,
7369
],
[
7536,
7542
],
[
8574,
8580
],
[
8637,
8643
]
],
[
[
5705,
5715
],
[
6529,
6539
],
[
9302,
9312
],
[
12089,
12099
],
[
12473,
12483
]
],
[
[
5742,
5743
],
[
5824,
5825
],
[
5953,
5954
],
[
6037,
6038
],
[
6196,
6197
],
[
6280,
6281
]
],
[
[
5770,
5772
],
[
5787,
5789
],
[
5826,
5828
],
[
5857,
5859
],
[
5910,
5912
],
[
5995,
5997
],
[
6153,
6155
],
[
6238,
6240
],
[
6371,
6373
]
],
[
[
5808,
5815
],
[
5872,
5879
],
[
6115,
6122
]
],
[
[
5838,
5845
],
[
5882,
5889
],
[
6125,
6132
]
],
[
[
6396,
6397
],
[
6470,
6471
],
[
6540,
6541
]
],
[
[
6422,
6424
],
[
6439,
6441
],
[
6472,
6474
],
[
6494,
6496
],
[
6545,
6547
],
[
6592,
6594
]
],
[
[
6460,
6461
],
[
6509,
6510
]
],
[
[
6484,
6485
],
[
6514,
6515
]
],
[
[
6613,
6619
],
[
6753,
6759
],
[
6832,
6838
],
[
6920,
6926
],
[
6994,
7000
],
[
7039,
7045
]
],
[
[
6629,
6635
]
],
[
[
6636,
6637
],
[
6707,
6708
],
[
6767,
6768
],
[
6846,
6847
],
[
6934,
6935
]
],
[
[
6661,
6662
],
[
6677,
6678
],
[
6709,
6710
],
[
6963,
6964
]
],
[
[
6695,
6698
],
[
6736,
6739
],
[
6801,
6804
],
[
6892,
6895
]
],
[
[
6979,
6982
]
],
[
[
7002,
7005
],
[
10144,
10147
],
[
10443,
10446
],
[
11010,
11013
],
[
11598,
11601
],
[
11617,
11620
]
],
[
[
7076,
7079
]
],
[
[
7100,
7103
]
],
[
[
7317,
7322
],
[
7432,
7437
]
],
[
[
7351,
7356
],
[
7487,
7492
]
],
[
[
7494,
7499
],
[
7601,
7606
]
],
[
[
7524,
7529
],
[
7656,
7661
]
],
[
[
7665,
7671
],
[
8027,
8033
],
[
8049,
8055
],
[
8074,
8080
],
[
8157,
8163
],
[
8182,
8188
],
[
9026,
9032
],
[
9048,
9054
],
[
9071,
9077
],
[
9154,
9160
],
[
9177,
9183
]
],
[
[
7706,
7712
],
[
8135,
8141
],
[
9132,
9138
]
],
[
[
7760,
7763
],
[
9514,
9517
]
],
[
[
7765,
7767
],
[
7888,
7890
],
[
7928,
7930
],
[
8008,
8010
],
[
8116,
8118
],
[
8224,
8226
],
[
8264,
8266
],
[
8346,
8348
],
[
8398,
8400
],
[
8420,
8422
],
[
8440,
8442
],
[
8503,
8505
],
[
8596,
8598
],
[
8659,
8661
],
[
8711,
8713
],
[
8765,
8767
],
[
8842,
8844
],
[
8943,
8945
],
[
8987,
8989
],
[
9007,
9009
],
[
9113,
9115
],
[
9219,
9221
],
[
9262,
9264
],
[
9291,
9293
],
[
9379,
9381
],
[
9419,
9421
],
[
9462,
9464
],
[
9483,
9485
]
],
[
[
9586,
9590
],
[
10676,
10680
],
[
10724,
10728
]
],
[
[
9592,
9595
],
[
9632,
9635
],
[
9704,
9707
],
[
9770,
9773
],
[
9812,
9815
],
[
9840,
9843
],
[
9908,
9911
],
[
9970,
9973
],
[
10016,
10019
],
[
10041,
10044
],
[
10123,
10126
],
[
10200,
10203
],
[
10257,
10260
],
[
10280,
10283
],
[
10344,
10347
],
[
10422,
10425
],
[
10495,
10498
],
[
10566,
10569
],
[
10588,
10591
]
],
[
[
10326,
10331
],
[
10372,
10377
],
[
10451,
10456
],
[
11498,
11503
],
[
11606,
11611
],
[
12083,
12088
]
],
[
[
10798,
10801
],
[
11832,
11835
],
[
11851,
11854
]
],
[
[
10803,
10806
],
[
10909,
10912
],
[
11020,
11023
],
[
11092,
11095
],
[
11122,
11125
],
[
11146,
11149
],
[
11200,
11203
],
[
11250,
11253
],
[
11309,
11312
],
[
11379,
11382
],
[
11685,
11688
],
[
11743,
11746
],
[
11810,
11813
]
],
[
[
10883,
10884
],
[
10931,
10932
],
[
10950,
10951
]
],
[
[
10992,
10993
]
],
[
[
10994,
10996
],
[
11038,
11040
],
[
11053,
11055
]
],
[
[
11920,
11924
],
[
12143,
12147
]
],
[
[
12005,
12008
],
[
12288,
12291
],
[
12364,
12367
]
],
[
[
12010,
12013
],
[
12054,
12057
],
[
12149,
12152
],
[
12168,
12171
],
[
12234,
12237
],
[
12346,
12349
]
],
[
[
12044,
12051
],
[
12301,
12308
]
],
[
[
12447,
12456
],
[
12573,
12582
]
]
] |
import codecs
import os
import re
from setuptools import find_packages, setup
###############################################################################
# Using setup.py from Attrs as a template for finding components, awesome config.
# Original reference: https://github.com/python-attrs/attrs/blob/master/setup.py
NAME = "mutatest"
PACKAGES = find_packages()
META_PATH = os.path.join("mutatest", "__init__.py")
KEYWORDS = ["mutatest", "mutation", "testing", "test", "mutant", "mutate", "pytest"]
PROJECT_URLS = {
"Documentation": "https://mutatest.readthedocs.io/",
"Bug Tracker": "https://github.com/EvanKepner/mutatest/issues",
"Source Code": "https://github.com/EvanKepner/mutatest",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"Environment :: Console",
"Framework :: Pytest",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Testing :: Unit",
]
# Built to run with pytest, but not an installation requirement for the API
INSTALL_REQUIRES = ["coverage>=4.4"]
EXTRAS_REQUIRE = {
"docs": ["coverage", "ipython", "sphinx"], # kept in docs/requirements.txt for RTD
"tests": [
"pytest >= 4.0.0",
"freezegun",
"coverage",
"pytest-cov",
"pytest-xdist",
"tox",
"virtualenv",
"hypothesis",
],
"qa": ["mypy", "black", "pre-commit", "isort"],
}
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["docs"] + EXTRAS_REQUIRE["qa"]
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), META_FILE, re.M)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
VERSION = find_meta("version")
URL = find_meta("url")
LONG = "\n\n".join([read("README.rst"), read("CHANGELOG.rst"), read("AUTHORS.rst")])
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=URL,
project_urls=PROJECT_URLS,
version=VERSION,
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
keywords=KEYWORDS,
long_description=LONG,
packages=PACKAGES,
python_requires=">=3.7.0",
zip_safe=False,
entry_points={"console_scripts": ["mutatest=mutatest.cli:cli_main"]},
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
include_package_data=True,
)
| [
[
[
7,
13
],
[
2096,
2102
]
],
[
[
21,
23
],
[
381,
383
],
[
1889,
1891
],
[
1905,
1907
],
[
2108,
2110
]
],
[
[
31,
33
],
[
2306,
2308
],
[
2383,
2385
]
],
[
[
58,
71
],
[
353,
366
]
],
[
[
73,
78
],
[
2695,
2700
]
],
[
[
324,
328
],
[
2715,
2719
]
],
[
[
342,
350
],
[
3074,
3082
]
],
[
[
369,
378
],
[
2200,
2209
]
],
[
[
421,
429
],
[
3016,
3024
]
],
[
[
506,
518
],
[
2843,
2855
]
],
[
[
712,
723
],
[
3241,
3252
]
],
[
[
1390,
1406
],
[
3279,
3295
]
],
[
[
1427,
1441
],
[
1808,
1822
],
[
1834,
1848
],
[
1859,
1873
],
[
1784,
1798
],
[
3320,
3334
]
],
[
[
1882,
1886
],
[
2121,
2125
]
],
[
[
1938,
1942
],
[
2195,
2199
],
[
2597,
2601
],
[
2617,
2621
],
[
2640,
2644
]
],
[
[
2183,
2192
],
[
2372,
2381
]
],
[
[
2217,
2226
],
[
2533,
2542
],
[
2560,
2569
],
[
2741,
2750
],
[
2783,
2792
],
[
2897,
2906
],
[
2939,
2948
],
[
2978,
2987
]
],
[
[
2523,
2530
],
[
2873,
2880
]
],
[
[
2554,
2557
],
[
2817,
2820
]
],
[
[
2577,
2581
],
[
3051,
3055
]
]
] |
from random import randint
from os import system
c = 0
#Limpa tela
system('cls')
print('=-'*20)
print('VAMOS JOGAR PAR OU IMPAR')
print('=-'*20)
#Loop do programa
while True:
n = int(input('Diga um valor: '))
computador = randint (0, 10)
while True:
decisao = str(input('Par ou impar [P/I] ')).upper()
if decisao in 'PI':
break
else:
print('Por favor escolha par ou impar')
if (n + computador) % 2 == 0:
print('-'*40)
print(f'Voce jogou {n} e o computador {computador}. Total {n+computador} deu PAR')
if decisao == 'P':
print('=-'*20)
print('Voce venceu')
c += 1
elif decisao == 'I':
print('Voce perdeu')
print('=-'*20)
print(f'GAME OVER! Voce venceu {c} vezes')
break
else:
print('-'*40)
print(f'Voce jogou {n} e o computador {computador}. Total {n+computador} deu IMPAR')
print('-'*40)
if decisao == 'I':
print('=-'*20)
print('Voce venceu')
c += 1
elif decisao == 'P':
print('Voce perdeu')
print('=-'*20)
print(f'GAME OVER! Voce venceu {c} vezes')
break
print('='*20)
#SOLUCAO ORIGINAL COM 28 LINHAS | [
[
[
19,
26
],
[
230,
237
]
],
[
[
42,
48
],
[
67,
73
]
],
[
[
49,
50
],
[
692,
693
],
[
832,
833
],
[
1109,
1110
],
[
1249,
1250
]
],
[
[
179,
180
],
[
452,
453
],
[
528,
529
],
[
567,
568
],
[
921,
922
],
[
960,
961
]
],
[
[
217,
227
],
[
456,
466
],
[
547,
557
],
[
569,
579
],
[
940,
950
],
[
962,
972
]
],
[
[
275,
282
],
[
338,
345
],
[
602,
609
],
[
712,
719
],
[
1019,
1026
],
[
1129,
1136
]
]
] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test hccl allreduce performance with 8p"""
import os
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
import mindspore.communication.management as D
from mindspore import context
from mindspore.context import ParallelMode
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
np.random.seed(1)
os.environ['GLOG_v'] = str(2)
class AllReduceNet(nn.Cell):
def __init__(self):
super(AllReduceNet, self).__init__()
self.mul = P.Mul()
self.all_reduce = P.AllReduce()
self.add = P.Add()
def construct(self, x):
x = self.mul(x, 2)
y1 = Tensor(np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])).astype(np.float32)
z = self.add(x, y1)
z = self.all_reduce(z)
y2 = Tensor(np.array([[-16, -16, -16, -16], [-16, -16, -16, -16], [-16, -16, -16, -16]])).astype(np.float32)
out = self.add(z, y2)
out = self.all_reduce(out)
out = self.mul(out, 2)
return out
def train_allreduce_8p(q, device_id, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=False,
device_num=device_num)
net = AllReduceNet()
input_x = np.ones([3, 4]).astype(np.float32)
output = net(Tensor(input_x, mstype.float32))
q.put(output)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_allreduce_8p():
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
expect_output = [[256, 256, 256, 256], [256, 256, 256, 256], [256, 256, 256, 256]]
assert not q.empty()
output = Tensor(q.get())
assert np.allclose(output.asnumpy(), expect_output)
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
| [
[
[
722,
724
],
[
1164,
1166
],
[
1882,
1884
],
[
1923,
1925
],
[
2049,
2051
],
[
2123,
2125
],
[
2166,
2168
],
[
3411,
3413
]
],
[
[
753,
760
],
[
2866,
2873
]
],
[
[
762,
767
],
[
2781,
2786
]
],
[
[
775,
781
],
[
2571,
2577
],
[
2591,
2597
],
[
2633,
2639
],
[
2675,
2681
]
],
[
[
789,
800
],
[
1146,
1148
],
[
1463,
1465
],
[
1524,
1526
],
[
1615,
1617
],
[
1700,
1702
],
[
2466,
2468
],
[
2489,
2491
],
[
3325,
3327
]
],
[
[
808,
826
],
[
1214,
1216
]
],
[
[
849,
855
],
[
1456,
1462
],
[
1608,
1614
],
[
2518,
2524
],
[
3294,
3300
]
],
[
[
878,
893
],
[
2534,
2540
]
],
[
[
920,
935
],
[
1312,
1313
],
[
1346,
1347
],
[
1379,
1380
]
],
[
[
943,
982
],
[
2212,
2213
]
],
[
[
1005,
1012
],
[
1952,
1959
],
[
1977,
1984
],
[
2225,
2232
],
[
2267,
2274
]
],
[
[
1043,
1055
],
[
2315,
2327
]
],
[
[
1057,
1083
],
[
2092,
2118
]
],
[
[
1201,
1213
],
[
1262,
1274
],
[
2437,
2449
]
],
[
[
1832,
1850
],
[
2881,
2899
]
],
[
[
2702,
2733
]
]
] |
# Directly download tasks when nlp format is different than original dataset
SQUAD_TASKS = {"squad_v1", "squad_v2"}
DIRECT_DOWNLOAD_TASKS_TO_DATA_URLS = {
"wsc": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip",
"multirc": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/MultiRC.zip",
"record": f"https://dl.fbaipublicfiles.com/glue/superglue/data/v2/ReCoRD.zip",
}
DIRECT_DOWNLOAD_TASKS = DIRECT_DOWNLOAD_TASKS_TO_DATA_URLS.keys()
| [
[
[
77,
88
]
],
[
[
116,
150
],
[
426,
460
]
],
[
[
402,
423
]
]
] |
import json
from types import SimpleNamespace
with open('./config/config.json') as json_file:
data = json.load(json_file, object_hook=lambda d: SimpleNamespace(**d)) | [
[
[
7,
11
],
[
106,
110
]
],
[
[
30,
45
],
[
149,
164
]
],
[
[
84,
93
],
[
116,
125
]
],
[
[
99,
103
]
]
] |
# Robot to enter weekly sales data into the RobotSpareBin Industries Intranet.
import os
from Browser import Browser
from Browser.utils.data_types import SelectAttribute
from RPA.Excel.Files import Files
from RPA.HTTP import HTTP
from RPA.PDF import PDF
browser = Browser()
def open_the_intranet_website():
browser.new_page("https://robotsparebinindustries.com/")
def log_in():
browser.type_text("css=#username", "maria")
browser.type_secret("css=#password", "thoushallnotpass")
browser.click("text=Log in")
def download_the_excel_file():
http = HTTP()
http.download(
url="https://robotsparebinindustries.com/SalesData.xlsx",
overwrite=True)
def fill_and_submit_the_form_for_one_person(sales_rep):
browser.type_text("css=#firstname", sales_rep["First Name"])
browser.type_text("css=#lastname", sales_rep["Last Name"])
browser.type_text("css=#salesresult", str(sales_rep["Sales"]))
browser.select_options_by(
"css=#salestarget",
SelectAttribute["value"],
str(sales_rep["Sales Target"]))
browser.click("text=Submit")
def fill_the_form_using_the_data_from_the_excel_file():
excel = Files()
excel.open_workbook("SalesData.xlsx")
sales_reps = excel.read_worksheet_as_table(header=True)
excel.close_workbook()
for sales_rep in sales_reps:
fill_and_submit_the_form_for_one_person(sales_rep)
def collect_the_results():
browser.take_screenshot(
filename=f"{os.getcwd()}/output/sales_summary.png",
selector="css=div.sales-summary")
def export_the_table_as_a_pdf():
sales_results_html = browser.get_property(
selector="css=#sales-results", property="outerHTML")
pdf = PDF()
pdf.html_to_pdf(sales_results_html, "output/sales_results.pdf")
def log_out():
browser.click("text=Log out")
def main():
try:
open_the_intranet_website()
log_in()
download_the_excel_file()
fill_the_form_using_the_data_from_the_excel_file()
collect_the_results()
export_the_table_as_a_pdf()
finally:
log_out()
browser.playwright.close()
if __name__ == "__main__":
main()
| [
[
[
87,
89
],
[
1488,
1490
]
],
[
[
111,
118
],
[
268,
275
]
],
[
[
156,
171
],
[
1012,
1027
]
],
[
[
200,
205
],
[
1181,
1186
]
],
[
[
227,
231
],
[
576,
580
]
],
[
[
252,
255
],
[
1723,
1726
]
],
[
[
258,
265
],
[
317,
324
],
[
394,
401
],
[
442,
449
],
[
503,
510
],
[
754,
761
],
[
819,
826
],
[
882,
889
],
[
949,
956
],
[
1082,
1089
],
[
1443,
1450
],
[
1630,
1637
],
[
1818,
1825
],
[
2122,
2129
]
],
[
[
284,
309
],
[
1879,
1904
]
],
[
[
380,
386
],
[
1915,
1921
]
],
[
[
538,
561
],
[
1932,
1955
]
],
[
[
698,
737
],
[
1359,
1398
]
],
[
[
1117,
1165
],
[
1966,
2014
]
],
[
[
1416,
1435
],
[
2025,
2044
]
],
[
[
1576,
1601
],
[
2055,
2080
]
],
[
[
1803,
1810
],
[
2104,
2111
]
],
[
[
1854,
1858
],
[
2182,
2186
]
]
] |
import os
import re
import sys
import time
from subprocess import PIPE, run
from types import ModuleType
from typing import Union
import docker
import requests
import storm.__main__ as storm
from lazycluster import Runtime, RuntimeGroup, RuntimeManager, RuntimeTask
from .config import RUNTIME_DOCKER_IMAGE, RUNTIME_NAMES, WORKSPACE_PORT
def setup_module(module: ModuleType) -> None:
""" setup any state specific to the execution of the given module."""
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
_start_runtime_container(runtime_name, docker_client)
# Sleep a moment to give all processes time to start within the Workspace containers
time.sleep(15)
for runtime_name in RUNTIME_NAMES:
_setup_ssh_connection_to_runtime(runtime_name)
def teardown_module(module: ModuleType) -> None:
"""teardown any state that was previously setup with a setup_module
method.
"""
_remove_runtimes()
class TestRuntime:
def test_setup(self) -> None:
for runtime_name in RUNTIME_NAMES:
completed_process = run(
f"ssh {runtime_name} 'echo $WORKSPACE_NAME'",
shell=True,
stdout=PIPE,
stderr=PIPE,
)
assert completed_process.stderr == b"", "The stderr is not emtpy"
stdout = completed_process.stdout.decode("UTF-8").replace("\n", "")
assert stdout == runtime_name, "Stdout is not equal to the runtime_name"
if not RUNTIME_NAMES:
raise RuntimeError("No runtime names in integration/config.py configured")
Runtime(RUNTIME_NAMES[0])
def test_echo(self) -> None:
runtime_name = RUNTIME_NAMES[len(RUNTIME_NAMES) - 1]
rt = Runtime(runtime_name)
msg = f"Hello Runtime {runtime_name}"
assert rt.echo(msg).rstrip("\n") == msg
def test_working(self) -> None:
runtime_name = RUNTIME_NAMES[0]
exp_working_dir = "/etc"
rt = Runtime(runtime_name, working_dir=exp_working_dir)
act_working_dir = rt.echo("${PWD}").rstrip("\n")
assert exp_working_dir == act_working_dir
task = RuntimeTask("get-working-dir").run_command("echo ${PWD}")
rt.execute_task(task, execute_async=False)
assert exp_working_dir == rt.execution_log(task.name)[0].rstrip("\n").rstrip(
"\r"
)
class TestRuntimeGroup:
def test_creation(self) -> None:
runtime_group = RuntimeGroup(hosts=RUNTIME_NAMES)
for runtime_name in RUNTIME_NAMES:
assert runtime_name in runtime_group._runtimes
assert isinstance(runtime_group._runtimes[runtime_name], Runtime)
class TestRuntimeManager:
def test_create_group(self) -> None:
runtime_group = RuntimeManager().create_group()
for runtime_name in RUNTIME_NAMES:
assert runtime_name in runtime_group._runtimes
assert isinstance(runtime_group._runtimes[runtime_name], Runtime)
# -------------------------------------------------------------------------
def _remove_runtimes() -> None:
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
try:
runtime_container = docker_client.containers.get(runtime_name)
runtime_container.remove(force=True)
except docker.errors.NotFound:
# TODO: handle create a docker container if not running as containerized test
print(f"Conatiner {runtime_name} not found")
# Delete ssh config as well, because the ssh setup fails
# when testing against multiple python versions
storm.delete(runtime_name)
def _get_current_container_id() -> str:
return run(
"awk -F/ '{ print $NF }' /proc/1/cpuset",
shell=True,
stdout=PIPE,
stderr=PIPE,
encoding="UTF-8",
).stdout.rstrip("\n")
def _start_runtime_container(name: str, client: docker.DockerClient) -> None:
try:
container = client.containers.run(
RUNTIME_DOCKER_IMAGE,
name=name,
environment={"WORKSPACE_NAME": name},
detach=True,
)
except docker.errors.APIError:
_remove_runtimes()
raise
container.reload()
ip_address = container.attrs["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
os.environ[name] = ip_address
_wait_until_started(ip_address, WORKSPACE_PORT)
def _setup_ssh_connection_to_runtime(runtime_name: str) -> None:
runtime_host = os.getenv(runtime_name, "localhost")
response = requests.get(
f"http://{runtime_host}:{WORKSPACE_PORT}/tooling/ssh/setup-command?origin=http://{runtime_host}:{WORKSPACE_PORT}"
)
ssh_script_runner_regex = rf'^\/bin\/bash <\(curl -s --insecure "(http:\/\/{runtime_host}:{WORKSPACE_PORT}\/shared\/ssh\/setup\?token=[a-z0-9]+&host={runtime_host}&port={WORKSPACE_PORT})"\)$'
pattern = re.compile(ssh_script_runner_regex)
match = pattern.match(response.text)
assert match, "SSH setup script url not found"
# Execute the ssh setup script and automatically pass an ssh connection name to the script
script_url = match.groups()[0]
r = requests.get(script_url)
setup_script_path = "./setup-ssh.sh"
_remove_file_if_exists(setup_script_path)
with open(setup_script_path, "w") as file:
file.write(r.text)
# make the file executable for the user
os.chmod(setup_script_path, 0o744)
completed_process = run(
[f'/bin/bash -c "{setup_script_path}"'],
input=runtime_name,
encoding="ascii",
shell=True,
stdout=PIPE,
stderr=PIPE,
)
# child = pexpect.spawn(f"/bin/bash {setup_script_path}", encoding="UTF-8")
# child.expect("Provide a name .*")
# child.sendline(runtime_name)
# child.expect("remote_ikernel was detected .*")
# child.sendline("no")
# child.expect("Do you want to add this connection as mountable SFTP storage .*")
# child.sendline("no")
# child.close()
_remove_file_if_exists(setup_script_path)
assert completed_process.stderr == ""
assert "Connection successful!" in completed_process.stdout
def _wait_until_started(ip_address: str, workspace_port: Union[str, int]) -> None:
index = 0
health_url = f"http://{ip_address}:{str(workspace_port)}/healthy"
response = None
while response is None or (response.status_code != 200 and index < 15):
index += 1
time.sleep(1)
try:
response = requests.get(health_url, allow_redirects=False, timeout=2)
except requests.ConnectionError:
# Catch error that is raised when the workspace container is not reachable yet
pass
if index == 15:
print("The workspace did not start")
sys.exit(-1)
def _remove_file_if_exists(path: str) -> None:
try:
os.remove(path)
except OSError:
pass
| [
[
[
7,
9
],
[
4370,
4372
],
[
4539,
4541
],
[
5451,
5453
],
[
6913,
6915
]
],
[
[
17,
19
],
[
4945,
4947
]
],
[
[
27,
30
],
[
6834,
6837
]
],
[
[
38,
42
],
[
697,
701
],
[
6502,
6506
]
],
[
[
66,
70
],
[
1221,
1225
],
[
1250,
1254
],
[
3826,
3830
],
[
3847,
3851
],
[
5654,
5658
],
[
5675,
5679
]
],
[
[
72,
75
],
[
1103,
1106
],
[
3736,
3739
],
[
5511,
5514
]
],
[
[
94,
104
],
[
368,
378
],
[
837,
847
]
],
[
[
124,
129
],
[
6269,
6274
]
],
[
[
138,
144
],
[
483,
489
],
[
3147,
3153
],
[
3356,
3362
],
[
3955,
3961
],
[
4190,
4196
]
],
[
[
152,
160
],
[
4592,
4600
],
[
5214,
5222
],
[
6552,
6560
],
[
6626,
6634
]
],
[
[
168,
191
],
[
3656,
3661
]
],
[
[
217,
224
],
[
1640,
1647
],
[
1774,
1781
],
[
2013,
2020
],
[
2701,
2708
],
[
3006,
3013
]
],
[
[
226,
238
],
[
2496,
2508
]
],
[
[
240,
254
],
[
2803,
2817
]
],
[
[
256,
267
],
[
2187,
2198
]
],
[
[
289,
309
],
[
4049,
4069
]
],
[
[
311,
324
],
[
526,
539
],
[
737,
750
],
[
1056,
1069
],
[
1529,
1542
],
[
1648,
1661
],
[
1723,
1736
],
[
1741,
1754
],
[
1950,
1963
],
[
2515,
2528
],
[
2558,
2571
],
[
2863,
2876
],
[
3189,
3202
]
],
[
[
326,
340
],
[
4436,
4450
],
[
4639,
4653
],
[
4711,
4725
],
[
4830,
4844
],
[
4909,
4923
]
],
[
[
347,
359
]
],
[
[
813,
828
]
],
[
[
981,
992
]
],
[
[
2417,
2433
]
],
[
[
2718,
2736
]
],
[
[
3099,
3115
],
[
954,
970
],
[
4222,
4238
]
],
[
[
3689,
3714
]
],
[
[
3911,
3935
],
[
549,
573
]
],
[
[
4458,
4490
],
[
760,
792
]
],
[
[
6216,
6235
],
[
4404,
4423
]
],
[
[
6853,
6875
],
[
5285,
5307
],
[
6061,
6083
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, Tu Dortmund, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import numpy as np
from pandas import Series
from pandapower import element_bus_tuples
__author__ = "smeinecke"
def convert_voltlvl_to_int(voltage_level):
""" Returns voltage level names as int. """
if voltage_level in ["EHV", "ehv", "UHV", "uhv"]:
return 1
elif voltage_level in ["EHV-HV", "ehv-hv", "UHV-HV", "uhv-hv", "EHVHV", "ehvhv", "UHVHV",
"uhvhv"]:
return 2
elif voltage_level in ["HV", "hv"]:
return 3
elif voltage_level in ["HV-MV", "hv-mv", "HVMV", "hvmv"]:
return 4
elif voltage_level in ["MV", "mv"]:
return 5
elif voltage_level in ["MV-LV", "mv-lv", "MVLV", "mvlv"]:
return 6
elif voltage_level in ["LV", "lv"]:
return 7
else:
return int(voltage_level)
def convert_voltlvl_to_str(voltage_level):
""" Returns voltage level names as string. """
return ["EHV", "EHV-HV", "HV", "HV-MV", "MV", "MV-LV", "LV"][convert_voltlvl_to_int(
voltage_level)-1]
def convert_voltlvl_names(voltage_levels, desired_format):
""" Returns voltage level names in desired format.
EXAMPLE:
voltlvl_names = convert_voltlvl_names([1, 2, "hv", 4, 5, "ehv", 7], str)
"""
if desired_format == str:
if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, "__iter__")):
return convert_voltlvl_to_str(voltage_levels)
else:
names = []
for voltage_level in voltage_levels:
for voltage_level in voltage_levels:
names += [convert_voltlvl_to_str(voltage_level)]
return names
elif desired_format == int:
if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, "__iter__")):
return convert_voltlvl_to_int(voltage_levels)
else:
names = []
for voltage_level in voltage_levels:
for voltage_level in voltage_levels:
names += [convert_voltlvl_to_int(voltage_level)]
return names
else:
raise ValueError("desired_format must be str or int")
def _voltlvl_idx(net, element, voltage_level, branch_bus=None, vn_kv_limits=[145, 60, 1]):
""" similar to voltlvl_idx, but for only one voltage_level """
vn_kv_limits = [np.inf] + vn_kv_limits + [-np.inf]
voltage_level = convert_voltlvl_names(voltage_level, int)
lim_max = [0, 0, 1, 1, 2, 2, 3][voltage_level-1]
lim_min = [1, 2, 2, 3, 3, 4, 4][voltage_level-1]
Idx_bus = net.bus.index[(net.bus.vn_kv <= vn_kv_limits[lim_max]) &
(net.bus.vn_kv > vn_kv_limits[lim_min])]
if element == "bus":
return list(Idx_bus)
if branch_bus is None and element not in ["trafo", "trafo3w"]:
# for all other elements than trafos, take the first possibility
for elm, bus_name in element_bus_tuples():
if elm == element:
branch_bus = bus_name
break
if element == "measurement":
measurement_buses = Series(index=net.measurement.index)
# bus
bool_ = net.measurement.element_type == "bus"
measurement_buses.loc[bool_] = net.measurement.element.loc[bool_]
# line and trafo
for branch, side in zip(["line", "line", "trafo", "trafo"], ["from", "to", "hv", "lv"]):
bus = side + "_bus"
bool1 = net.measurement.element_type == branch
bool2 = net.measurement.side == side
measurement_buses.loc[bool1 & bool2] = net[branch][bus].loc[net.measurement.element.loc[
bool1 & bool2]].values
measurement_buses = measurement_buses.astype(int)
isin_Idx_bus = measurement_buses.isin(Idx_bus)
elif branch_bus in net[element].columns: # all other elements than measurement and bus
isin_Idx_bus = net[element][branch_bus].isin(Idx_bus)
else:
raise KeyError("For net[%s] there is no column '%s'. Please" % (element, str(branch_bus)) +
" give 'branch_bus' an valid bus column name, e.g. 'hv_bus' or 'lv_bus'.")
return list(net[element].index[isin_Idx_bus])
def voltlvl_idx(net, element, voltage_levels, branch_bus=None, vn_kv_limits=[145, 60, 1]):
"""
Returns indices of elements with special voltage level.
Even voltage_level numbers behave equally to both neighboring numbers, i.e. 4 == [3, 5] and
"EHV-HV" == ["EHV", "HV"].
EXAMPLE:
hv_and_mv_buses = voltlvl_idx(net, "bus", 4) # 4 == [3, 5]
hv_and_mv_buses = voltlvl_idx(net, "bus", [3, 5])
mv_loads = voltlvl_idx(net, "load", "MV")
hvmv_trafos = voltlvl_idx(net, "trafo", "HV", branch_bus="hv_bus")
hvmv_trafos = voltlvl_idx(net, "trafo", "MV", branch_bus="lv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", 2, branch_bus="hv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", [1, 3], branch_bus="hv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", 4, branch_bus="lv_bus")
ehvhv_and_hvmv_trafos = voltlvl_idx(net, "trafo", [3, 5], branch_bus="lv_bus")
ehvhv_trafos = voltlvl_idx(net, "trafo", 2, branch_bus="lv_bus")
ehv_measurements = voltlvl_idx(net, "measurement", "EHV")
"""
if not net[element].shape[0]:
return []
if isinstance(voltage_levels, str) | (not hasattr(voltage_levels, "__iter__")):
return _voltlvl_idx(net, element, voltage_levels, branch_bus=branch_bus,
vn_kv_limits=vn_kv_limits)
else:
Idx = []
for voltage_level in voltage_levels:
Idx += _voltlvl_idx(net, element, voltage_level, branch_bus=branch_bus,
vn_kv_limits=vn_kv_limits)
return Idx
def get_voltlvl(voltage_values, vn_kv_limits=[145, 60, 1]):
""" Returns an array of voltage levels as integer. """
iter_ = hasattr(voltage_values, "__iter__")
voltage_values = voltage_values if iter_ else [voltage_values]
voltage_values = np.array(voltage_values)
voltage_levels = np.ones(voltage_values.shape)
for lim in vn_kv_limits:
voltage_levels[voltage_values <= lim] += 2
if iter_:
return voltage_levels.astype(int)
else:
return int(voltage_levels[0])
| [
[
[
288,
299
],
[
2592,
2594
],
[
2619,
2621
],
[
6319,
6321
],
[
6365,
6367
]
],
[
[
319,
325
],
[
3334,
3340
]
],
[
[
349,
367
],
[
3159,
3177
]
],
[
[
369,
379
]
],
[
[
400,
422
],
[
1240,
1262
],
[
2064,
2086
],
[
2272,
2294
]
],
[
[
1085,
1107
],
[
1649,
1671
],
[
1857,
1879
]
],
[
[
1300,
1321
],
[
2647,
2668
]
],
[
[
2418,
2430
],
[
5707,
5719
],
[
5919,
5931
]
],
[
[
4448,
4459
]
],
[
[
6068,
6079
]
]
] |
"""Example systems created in Python
"""
import numpy as np
from pysim.cythonsystem import Sys
class VanDerPol(Sys):
"""Simple example of a class representing a VanDerPol oscillator.
"""
def __init__(self):
self.add_state_scalar("x", "dx")
self.add_state_scalar("y", "dy")
self.add_input_scalar("a")
self.add_input_scalar("b")
self.inputs.a = 1.0
self.inputs.b = 1.0
self.states.x = 1.0
self.states.y = 0.0
def do_step(self,dummy):
"""Perform a timestep by implmenting the VanDerPol equations"""
a = self.inputs.a
b = self.inputs.b
x = self.states.x
y = self.states.y
self.ders.dx = a*x*(b-y*y)-y
self.ders.dy = x
class MassSpringDamper(Sys):
"""Simple class for testing the mass-spring-damper simulations with
a cython system"""
def __init__(self):
"""Setup two states (one dimensional vectors for now). Initial
conditions are simular to those in the build in c++ system"""
self.add_state_scalar("x1", "dx1")
self.add_state_scalar("x2", "dx2")
self.states.x1 = 1
self.states.x2 = 0
def do_step(self,dummy):
"""Perform a step using default constants, same as those in the
cpp system"""
m = 100.0
b = 1.0
k = 50.0
f = 0.0
x1 = self.states.x1
x2 = self.states.x2
self.ders.dx1 = x2
self.ders.dx2 =-k/m*x1-b/m*x2+1/m*f
class InOutTestSystem(Sys):
"""Python representation of the cpp InOutTestSystem
Used for testing that the cpp system behaves as the python system
with regards to the input output handling
"""
def __init__(self):
self.add_input_scalar("input_scalar")
self.add_input_vector("input_vector",3)
self.add_input_matrix("input_matrix",3,3)
self.add_state_scalar("state_scalar","der_scalar")
self.add_state_vector("state_vector","der_vector", 3)
self.add_state_matrix("state_matrix","der_matrix", 3, 3)
self.add_output_scalar("input_output_scalar")
self.add_output_vector("input_output_vector",3)
self.add_output_matrix("input_output_matrix",3,3)
self.add_output_scalar("state_output_scalar")
self.add_output_vector("state_output_vector",3)
self.add_output_matrix("state_output_matrix",3,3)
self.inputs.input_scalar = 0.0
self.inputs.input_vector = [0.0, 0.0, 0.0]
self.inputs.input_matrix = np.zeros((3,3))
self.outputs.input_output_scalar = 0.0
self.outputs.input_output_vector = [0.0, 0.0, 0.0]
self.outputs.input_output_matrix = np.zeros((3,3))
self.outputs.state_output_scalar = 0.0
self.outputs.state_output_vector = [0.0, 0.0, 0.0]
self.outputs.state_output_matrix = np.zeros((3,3))
self.states.state_scalar = 1.23
self.states.state_vector = np.ones(3)*4.56
self.states.state_matrix = np.ones((3,3))*7.89
self.ders.der_scalar = 0
self.ders.der_vector = np.zeros(3)
self.ders.der_matrix = np.zeros((3,3))
def do_step(self,dummy):
"""During a timestep we set the outputs to their respective inputs"""
self.outputs.input_output_scalar = self.inputs.input_scalar
self.outputs.input_output_vector = self.inputs.input_vector
self.outputs.input_output_matrix = self.inputs.input_matrix
self.outputs.state_output_scalar = self.states.state_scalar
self.outputs.state_output_vector = self.states.state_vector
self.outputs.state_output_matrix = self.states.state_matrix
| [
[
[
48,
59
],
[
2545,
2547
],
[
2711,
2713
],
[
2876,
2878
],
[
2968,
2970
],
[
3019,
3021
],
[
3103,
3105
],
[
3146,
3148
]
],
[
[
92,
95
],
[
113,
116
],
[
788,
791
],
[
1540,
1543
]
],
[
[
103,
112
]
],
[
[
771,
787
]
],
[
[
1524,
1539
]
]
] |
from enum import Enum
from .durations import MINUTE, HOUR
class OpenshiftVersion(Enum):
VERSION_4_6 = "4.6"
VERSION_4_7 = "4.7"
VERSION_4_8 = "4.8"
VERSION_4_9 = "4.9"
class NetworkType:
OpenShiftSDN = "OpenShiftSDN"
OVNKubernetes = "OVNKubernetes"
WORKING_DIR = "build"
TF_FOLDER = f"{WORKING_DIR}/terraform"
TFVARS_JSON_NAME = "terraform.tfvars.json"
IMAGE_FOLDER = "/tmp/test_images"
TF_MAIN_JSON_NAME = "main.tf"
BASE_IMAGE_FOLDER = "/tmp/images"
IMAGE_NAME = "installer-image.iso"
STORAGE_PATH = "/var/lib/libvirt/openshift-images"
HOST_PASSTHROUGH_CPU_MODE = "host-passthrough"
MASTER_TF_CPU_MODE = HOST_PASSTHROUGH_CPU_MODE
WORKER_TF_CPU_MODE = HOST_PASSTHROUGH_CPU_MODE
NODES_REGISTERED_TIMEOUT = 20 * MINUTE
DEFAULT_CHECK_STATUSES_INTERVAL = 5
CLUSTER_INSTALLATION_TIMEOUT = HOUR
CLUSTER_INSTALLATION_TIMEOUT_OCS = 95 * MINUTE
START_CLUSTER_INSTALLATION_TIMEOUT = 6 * MINUTE
INSTALLING_IN_PROGRESS_TIMEOUT = 15 * MINUTE
VALIDATION_TIMEOUT = 6 * MINUTE
NTP_VALIDATION_TIMEOUT = 10 * MINUTE
OCS_VALIDATION_TIMEOUT = 10 * MINUTE
CNV_VALIDATION_TIMEOUT = 10 * MINUTE
READY_TIMEOUT = 15 * MINUTE
DISCONNECTED_TIMEOUT = 10 * MINUTE
PENDING_USER_ACTION_TIMEOUT = 30 * MINUTE
ERROR_TIMEOUT = 10 * MINUTE
TF_TEMPLATES_ROOT = "terraform_files"
TF_TEMPLATE_BARE_METAL_FLOW = f"{TF_TEMPLATES_ROOT}/baremetal"
TF_TEMPLATE_NONE_PLATFORM_FLOW = f"{TF_TEMPLATES_ROOT}/none"
TF_TEMPLATE_BARE_METAL_INFRA_ENV_FLOW = f"{TF_TEMPLATES_ROOT}/baremetal_infra_env"
TF_NETWORK_POOL_PATH = "/tmp/tf_network_pool.json"
NUMBER_OF_MASTERS = 3
TEST_INFRA = "test-infra"
CLUSTER = CLUSTER_PREFIX = "%s-cluster" % TEST_INFRA
INFRA_ENV_PREFIX = "%s-infra-env" % TEST_INFRA
TEST_NETWORK = "test-infra-net-"
TEST_SECONDARY_NETWORK = "test-infra-secondary-network-"
DEFAULT_CLUSTER_KUBECONFIG_DIR_PATH = "build/kubeconfig"
WAIT_FOR_BM_API = 15 * MINUTE
NAMESPACE_POOL_SIZE = 15
PODMAN_FLAGS = "--cgroup-manager=cgroupfs --storage-driver=vfs --events-backend=file"
DEFAULT_ADDITIONAL_NTP_SOURCE = "clock.redhat.com"
DEFAULT_BASE_DNS_DOMAIN = "redhat.com"
DEFAULT_NAMESPACE = 'assisted-installer'
DEFAULT_TEST_INFRA_DOMAIN = f".{CLUSTER_PREFIX}-{DEFAULT_NAMESPACE}.{DEFAULT_BASE_DNS_DOMAIN}"
TEST_TARGET_INTERFACE = "vnet3"
SUFFIX_LENGTH = 8
OCP_VERSIONS_JSON_PATH = "assisted-service/data/default_ocp_versions.json"
DEFAULT_IPV6_SERVICE_CIDR = "2003:db8::/112"
DEFAULT_IPV6_CLUSTER_CIDR = "2002:db8::/53"
DEFAULT_IPV6_HOST_PREFIX = 64
DEFAULT_PROXY_SERVER_PORT = 3129
DEFAULT_LOAD_BALANCER_PORT = 6443
IP_NETWORK_ASSET_FIELDS = (
"machine_cidr",
"machine_cidr6",
"provisioning_cidr",
"provisioning_cidr6",
)
REQUIRED_ASSET_FIELDS = (
"libvirt_network_if",
"libvirt_secondary_network_if",
*IP_NETWORK_ASSET_FIELDS,
)
class ImageType:
FULL_ISO = "full-iso"
MINIMAL_ISO = "minimal-iso"
class NodeRoles:
WORKER = "worker"
MASTER = "master"
AUTO_ASSIGN = "auto-assign"
class NodesStatus:
INSUFFICIENT = "insufficient"
KNOWN = "known"
INSTALLING = "installing"
INSTALLING_IN_PROGRESS = "installing-in-progress"
INSTALLING_PENDING_USER_ACTION = "installing-pending-user-action"
INSTALLED = "installed"
ERROR = "error"
PENDING_FOR_INPUT = "pending-for-input"
DAY2_INSTALLED = "added-to-existing-cluster"
RESETING_PENDING_USER_ACTION = "resetting-pending-user-action"
DISCONNECTED = "disconnected"
INSUFFICIENT_UNBOUND = "insufficient-unbound"
KNOWN_UNBOUND = "known-unbound"
class ClusterStatus:
INSUFFICIENT = "insufficient"
INSTALLED = "installed"
READY = "ready"
PREPARING_FOR_INSTALLATION = "preparing-for-installation"
INSTALLING = "installing"
FINALIZING = "finalizing"
ERROR = "error"
PENDING_FOR_INPUT = "pending-for-input"
CANCELLED = "cancelled"
INSTALLING_PENDING_USER_ACTION = "installing-pending-user-action"
class HostsProgressStages:
START_INSTALLATION = "Starting installation"
INSTALLING = "Installing"
WRITE_IMAGE_TO_DISK = "Writing image to disk"
WAIT_FOR_CONTROL_PLANE = "Waiting for control plane"
REBOOTING = "Rebooting"
WAIT_FOR_IGNITION = "Waiting for ignition"
JOINED = "Joined"
CONFIGURING = "Configuring"
DONE = "Done"
class AgentStatus:
VALIDATED = "Validated"
INSTALLED = "Installed"
REQUIREMENTS_MET = "RequirementsMet"
all_host_stages = [HostsProgressStages.START_INSTALLATION, HostsProgressStages.INSTALLING,
HostsProgressStages.WRITE_IMAGE_TO_DISK, HostsProgressStages.WAIT_FOR_CONTROL_PLANE,
HostsProgressStages.REBOOTING, HostsProgressStages.WAIT_FOR_IGNITION,
HostsProgressStages.CONFIGURING, HostsProgressStages.JOINED, HostsProgressStages.DONE]
class Events:
REGISTERED_CLUSTER = "Registered cluster"
SUCCESSFULLY_REGISTERED_CLUSTER = "Successfully registered cluster"
PENDING_FOR_INPUT = "to pending-for-input"
GENERATED_IMAGE = "Generated image (SSH public key is set)"
GENERATED_IMAGE_FULL = "Generated image (Image type is \"full-iso\", SSH public key is set)"
GENERATED_IMAGE_MINIMAL = "Generated image (Image type is \"minimal-iso\", SSH public key is set)"
DOWNLOAD_IMAGE = "Started image download"
STARTED_DOWNLOAD_IMAGE = "Started image download (image type is \"full-iso\")"
HOST_REGISTERED_TO_CLUSTER = ": registered to cluster"
INSUFFICIENT = "insufficient"
KNOWN = "to \"known\""
READY = "to ready"
CLUSTER_VALIDATION = "Cluster validation \'all-hosts-are-ready-to-install\' is now fixed"
PREPARING_FOR_INSTALLATION = "updated status from \"known\" to \"preparing-for-installation\""
PREPARING_SUCCESSFUL = "updated status from \"preparing-for-installation\" to \"preparing-successful\""
SET_BOOTSTRAP = "set as bootstrap"
INSTALLING = "updated status from \"preparing-successful\" to \"installing\""
CLUSTER_PREPARED = "Cluster was prepared successfully for installation"
CLUSTER_INSTALLING = "to installing"
INSTALLING_IN_PROGRESS = "updated status from \"installing\" to \"installing-in-progress\""
INSTALLATION_STAGE = "reached installation stage Starting installation"
INSTALLING_PENDING_USER_ACTION = "installing-pending-user-action"
WRITING_IMAGE_TO_DISK = "reached installation stage Writing image to disk"
REBOOTING = "reached installation stage Rebooting"
CONTROL_PLANE = "reached installation stage Waiting for control plane"
IGNITION = "reached installation stage Waiting for ignition"
CONFIGURING = "reached installation stage Configuring"
JOINED = "reached installation stage Joined"
DONE = "reached installation stage Done"
CANCELLED_CLUSTER_INSTALLATION = "Cancelled cluster installation"
CANCELLED_FOR_HOST = "Installation cancelled for host"
CLUSTER_VERSION_DONE = "Operator cvo status: available message: Done"
CANCELLED_STATUS = "to \"cancelled\""
RESET_CLUSTER_INSTALLATION = "Reset cluster installation"
RESET_FOR_HOST = "Installation reset for host"
RESETTING_PENDING_USER_ACTION = "updated status from \"cancelled\" to \"resetting-pending-user-action\""
INSTALLED = "updated status from \"installing-in-progress\" to \"installed\""
FINALIZING = "to finalizing"
SUCCESSFULLY_INSTALLED = "Successfully finished installing cluster"
ERROR = "error"
DAY2_INSTALLED = "added-to-existing-cluster"
PROXY_SETTINGS_CHANGED = "Proxy settings changed"
class HostStatusInfo:
WRONG_BOOT_ORDER = "Expected the host to boot from disk, but it booted the installation image"
REBOOT_TIMEOUT = "Host failed to reboot within timeout"
class Platforms:
BARE_METAL = 'baremetal'
NONE = 'none'
VSPHERE = 'vsphere'
class HighAvailabilityMode:
FULL = 'Full'
NONE = 'None'
class BaseAsset:
MACHINE_CIDR = "192.168.127.0/24"
MACHINE_CIDR6 = "1001:db9::/120"
PROVISIONING_CIDR = "192.168.145.0/24"
PROVISIONING_CIDR6 = "3001:db9::/120"
NETWORK_IF = "tt1"
SECONDARY_NETWORK_IF = "stt1"
| [
[
[
17,
21
],
[
84,
88
]
],
[
[
46,
52
],
[
741,
747
],
[
860,
866
],
[
908,
914
],
[
953,
959
],
[
985,
991
],
[
1022,
1028
],
[
1059,
1065
],
[
1096,
1102
],
[
1124,
1130
],
[
1159,
1165
],
[
1201,
1207
],
[
1229,
1235
],
[
1850,
1856
]
],
[
[
54,
58
],
[
815,
819
]
],
[
[
67,
83
]
],
[
[
195,
206
]
],
[
[
280,
291
],
[
317,
328
]
],
[
[
302,
311
]
],
[
[
341,
357
]
],
[
[
384,
396
]
],
[
[
418,
435
]
],
[
[
448,
465
]
],
[
[
482,
492
]
],
[
[
517,
529
]
],
[
[
568,
593
],
[
636,
661
],
[
683,
708
]
],
[
[
615,
633
]
],
[
[
662,
680
]
],
[
[
709,
733
]
],
[
[
748,
779
]
],
[
[
784,
812
]
],
[
[
820,
852
]
],
[
[
867,
901
]
],
[
[
915,
945
]
],
[
[
960,
978
]
],
[
[
992,
1014
]
],
[
[
1029,
1051
]
],
[
[
1066,
1088
]
],
[
[
1103,
1116
]
],
[
[
1131,
1151
]
],
[
[
1166,
1193
]
],
[
[
1208,
1221
]
],
[
[
1236,
1253
],
[
1307,
1324
],
[
1373,
1390
],
[
1441,
1458
]
],
[
[
1274,
1301
]
],
[
[
1337,
1367
]
],
[
[
1398,
1435
]
],
[
[
1481,
1501
]
],
[
[
1532,
1549
]
],
[
[
1554,
1564
],
[
1622,
1632
],
[
1669,
1679
]
],
[
[
1580,
1587
]
],
[
[
1590,
1604
],
[
2131,
2145
]
],
[
[
1633,
1649
]
],
[
[
1680,
1692
]
],
[
[
1713,
1735
]
],
[
[
1770,
1805
]
],
[
[
1827,
1842
]
],
[
[
1857,
1876
]
],
[
[
1882,
1894
]
],
[
[
1968,
1997
]
],
[
[
2019,
2042
],
[
2168,
2191
]
],
[
[
2058,
2075
],
[
2148,
2165
]
],
[
[
2099,
2124
]
],
[
[
2194,
2215
]
],
[
[
2226,
2239
]
],
[
[
2244,
2266
]
],
[
[
2320,
2345
]
],
[
[
2365,
2390
]
],
[
[
2409,
2433
]
],
[
[
2439,
2464
]
],
[
[
2472,
2498
]
],
[
[
2507,
2530
],
[
2722,
2745
]
],
[
[
2629,
2650
]
],
[
[
2757,
2766
]
],
[
[
2834,
2843
]
],
[
[
2929,
2940
]
],
[
[
3486,
3499
]
],
[
[
3875,
3894
],
[
4368,
4387
],
[
4408,
4427
],
[
4459,
4478
],
[
4500,
4519
],
[
4563,
4582
],
[
4594,
4613
],
[
4652,
4671
],
[
4685,
4704
],
[
4713,
4732
]
],
[
[
4237,
4248
]
],
[
[
4349,
4364
]
],
[
[
4747,
4753
]
],
[
[
7449,
7463
]
],
[
[
7632,
7641
]
],
[
[
7722,
7742
]
],
[
[
7788,
7797
]
]
] |
from gym.envs.registration import register
register(
id='SimpleFlappy-v0',
entry_point='gym_simpleflappy.envs:FlappyEnv',
)
register(
id='SimpleFlappyDistance-v0',
entry_point='gym_simpleflappy.envs:FlappyEnvDistance',
)
| [
[
[
34,
42
],
[
44,
52
],
[
134,
142
]
]
] |
from haystack import indexes
from peeldb.models import (
JobPost,
Skill,
City,
FunctionalArea,
Industry,
Qualification,
State,
)
from datetime import datetime
from django.core import serializers
from mpcomp.views import get_absolute_url
class jobIndex(indexes.SearchIndex, indexes.Indexable):
"""
Indexing for job model
"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/job_text.txt"
)
title = indexes.CharField(model_attr="title")
designation = indexes.CharField(model_attr="job_role")
job_type = indexes.CharField(model_attr="job_type", faceted=True)
skills = indexes.MultiValueField()
location = indexes.MultiValueField()
slug = indexes.CharField(model_attr="slug")
min_year = indexes.IntegerField(model_attr="min_year")
max_year = indexes.IntegerField(model_attr="max_year")
min_month = indexes.IntegerField(model_attr="min_month")
max_month = indexes.IntegerField(model_attr="max_month")
min_salary = indexes.FloatField()
max_salary = indexes.FloatField()
industry = indexes.MultiValueField()
edu_qualification = indexes.MultiValueField()
functional_area = indexes.MultiValueField()
walkin_from_date = indexes.DateField(null=True, model_attr="walkin_from_date")
walkin_to_date = indexes.DateField(null=True, model_attr="walkin_to_date")
status = indexes.CharField(model_attr="status")
# posted_on = indexes.DateField(model_attr='posted_on')
created_on = indexes.DateField(model_attr="created_on")
description = indexes.CharField(model_attr="description")
post_url = indexes.CharField()
company_name = indexes.CharField(model_attr="company_name")
company = indexes.CharField(model_attr="company", null=True)
published_on = indexes.DateField(model_attr="published_on", null=True)
def get_model(self):
return JobPost
def prepare_post_url(self, obj):
return get_absolute_url(obj)
def prepare_skills(self, obj):
return [str(s.name) for s in obj.skills.filter(status="Active")]
def prepare_location(self, obj):
locations = serializers.serialize("json", obj.location.all())
return locations
def prepare_industry(self, obj):
return [str(s.name) for s in obj.industry.all()]
def prepare_functional_area(self, obj):
return [str(l.name) for l in obj.functional_area.all()]
def prepare_min_salary(self, obj):
if int(obj.min_salary) > 0:
return float(obj.min_salary) / 100000
else:
return 0.0
def prepare_max_salary(self, obj):
if int(obj.max_salary) > 0:
return float(obj.max_salary) / 100000
else:
return 0.0
def prepare_created_on(self, obj):
if obj.created_on:
current_date = datetime.strptime(str(obj.created_on), "%Y-%m-%d").strftime(
"%Y-%m-%d"
)
return current_date
return None
def prepare_published_on(self, obj):
if obj.published_on:
current_date = datetime.strptime(
str(obj.published_on.date()), "%Y-%m-%d"
).strftime("%Y-%m-%d")
return current_date
return None
def prepare_edu_qualification(self, obj):
return [str(s.name) for s in obj.edu_qualification.filter(status="Active")]
# def prepare_walkin_from_date(self, obj):
# if obj.walkin_from_date:
# current_date = datetime.strptime(str(obj.walkin_from_date), "%Y-%m-%d").strftime("%Y-%m-%d 00:00:00")
# return current_date
# return None
# def prepare_walkin_to_date(self, obj):
# if obj.walkin_to_date:
# current_date = datetime.strptime(str(obj.walkin_to_date), "%Y-%m-%d").strftime("%Y-%m-%d 00:00:00")
# return current_date
# return None
def index_queryset(self, using=None):
# from datetime import datetime
# current_date = datetime.strptime(str(datetime.now().date()), "%Y-%m-%d").strftime("%Y-%m-%d")
return (
self.get_model()
.objects.filter(status="Live")
.select_related("company", "user")
.prefetch_related(
"location", "edu_qualification", "industry", "skills", "functional_area"
)
)
class skillautoIndex(indexes.SearchIndex, indexes.Indexable):
"""
Index for autocompleate for designation and skills
"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/skill_text.txt"
)
skill_name = indexes.CharField(model_attr="name")
skill_slug = indexes.CharField(model_attr="slug")
no_of_jobposts = indexes.CharField()
status = indexes.CharField(model_attr="status")
def get_model(self):
return Skill
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Active")
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class locationIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/city_text.txt"
)
city_name = indexes.CharField(model_attr="name")
no_of_jobposts = indexes.CharField()
status = indexes.CharField(model_attr="status")
def get_model(self):
return City
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Enabled")
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class industryIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/industry_text.txt"
)
industry_name = indexes.CharField(model_attr="name")
no_of_jobposts = indexes.CharField()
industry_slug = indexes.CharField(model_attr="slug")
def get_model(self):
return Industry
def index_queryset(self, using=None):
return self.get_model().objects.all()
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class functionalareaIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/functionalarea_text.txt"
)
functionalarea_name = indexes.CharField(model_attr="name")
no_of_jobposts = indexes.CharField()
def get_model(self):
return FunctionalArea
def index_queryset(self, using=None):
return self.get_model().objects.all()
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
class qualificationIndex(indexes.SearchIndex, indexes.Indexable):
""" index for loacation"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/qualification_text.txt"
)
edu_name = indexes.CharField(model_attr="name")
edu_slug = indexes.CharField(model_attr="slug")
no_of_jobposts = indexes.CharField()
def get_model(self):
return Qualification
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Active")
class stateIndex(indexes.SearchIndex, indexes.Indexable):
""" index for State"""
text = indexes.CharField(
document=True, use_template=True, template_name="index/state_text.txt"
)
state_name = indexes.CharField(model_attr="name")
no_of_cities = indexes.CharField()
status = indexes.CharField(model_attr="status")
no_of_jobposts = indexes.CharField()
is_duplicate = indexes.BooleanField(default=False)
def get_model(self):
return State
def index_queryset(self, using=None):
return self.get_model().objects.filter(status="Enabled")
def prepare_no_of_cities(self, obj):
return obj.state.all().count()
def prepare_no_of_jobposts(self, obj):
return obj.get_no_of_jobposts().count()
def prepare_is_duplicate(self, obj):
return obj.state.filter(name=obj.name).exists()
| [
[
[
21,
28
],
[
282,
289
],
[
303,
310
],
[
378,
385
],
[
492,
499
],
[
548,
555
],
[
604,
611
],
[
672,
679
],
[
713,
720
],
[
750,
757
],
[
802,
809
],
[
861,
868
],
[
921,
928
],
[
982,
989
],
[
1044,
1051
],
[
1082,
1089
],
[
1118,
1125
],
[
1168,
1175
],
[
1216,
1223
],
[
1265,
1272
],
[
1346,
1353
],
[
1417,
1424
],
[
1533,
1540
],
[
1594,
1601
],
[
1653,
1660
],
[
1692,
1699
],
[
1751,
1758
],
[
1821,
1828
],
[
4407,
4414
],
[
4428,
4435
],
[
4531,
4538
],
[
4652,
4659
],
[
4706,
4713
],
[
4764,
4771
],
[
4797,
4804
],
[
5104,
5111
],
[
5125,
5132
],
[
5188,
5195
],
[
5307,
5314
],
[
5365,
5372
],
[
5398,
5405
],
[
5705,
5712
],
[
5726,
5733
],
[
5789,
5796
],
[
5916,
5923
],
[
5974,
5981
],
[
6014,
6021
],
[
6310,
6317
],
[
6331,
6338
],
[
6394,
6401
],
[
6533,
6540
],
[
6591,
6598
],
[
6875,
6882
],
[
6896,
6903
],
[
6959,
6966
],
[
7086,
7093
],
[
7138,
7145
],
[
7196,
7203
],
[
7489,
7496
],
[
7510,
7517
],
[
7569,
7576
],
[
7690,
7697
],
[
7746,
7753
],
[
7779,
7786
],
[
7839,
7846
],
[
7878,
7885
]
],
[
[
61,
68
],
[
1918,
1925
]
],
[
[
74,
79
],
[
4877,
4882
]
],
[
[
85,
89
],
[
5478,
5482
]
],
[
[
95,
109
],
[
6652,
6666
]
],
[
[
115,
123
],
[
6092,
6100
]
],
[
[
129,
142
],
[
7257,
7270
]
],
[
[
148,
153
],
[
7955,
7960
]
],
[
[
178,
186
],
[
2867,
2875
],
[
3119,
3127
]
],
[
[
211,
222
],
[
2168,
2179
]
],
[
[
248,
264
],
[
1979,
1995
]
],
[
[
273,
281
]
],
[
[
4392,
4406
]
],
[
[
5090,
5103
]
],
[
[
5691,
5704
]
],
[
[
6290,
6309
]
],
[
[
6856,
6874
]
],
[
[
7478,
7488
]
]
] |
#coding:utf-8
#
# id: bugs.core_5097
# title: COMPUTED-BY expressions are not converted to their field type inside the engine
# decription:
#
# tracker_id: CORE-5097
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('^((?!sqltype|T2_CHECK|C1_CHECK).)*$', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table test1(
t0 timestamp default 'now'
,t1 timestamp computed by( 'now' )
,t2 computed by( extract(weekday from t1) )
);
recreate table test2 (n1 integer, c1 integer computed by (1.2));
commit;
insert into test1 default values;
insert into test2 values (0);
commit;
set list on;
set sqlda_display on;
select * from test1 rows 0;
select * from test2 rows 0;
set sqlda_display off;
select iif( t2 between 0 and 6, 1, 0 ) t2_check from test1;
select c1 || '' as c1_check from test2;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
01: sqltype: 510 TIMESTAMP Nullable scale: 0 subtype: 0 len: 8
02: sqltype: 510 TIMESTAMP Nullable scale: 0 subtype: 0 len: 8
03: sqltype: 500 SHORT Nullable scale: 0 subtype: 0 len: 2
01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
T2_CHECK 1
C1_CHECK 1
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| [
[
[
277,
283
],
[
1620,
1626
]
],
[
[
308,
318
],
[
468,
478
]
],
[
[
320,
328
],
[
1127,
1135
]
],
[
[
330,
336
],
[
1667,
1673
]
],
[
[
372,
387
],
[
1173,
1188
]
],
[
[
437,
450
],
[
499,
512
]
],
[
[
461,
465
]
],
[
[
515,
528
],
[
1144,
1157
]
],
[
[
1119,
1124
]
],
[
[
1191,
1208
],
[
1704,
1721
]
],
[
[
1653,
1659
]
]
] |
from enum import Enum
from typing import Any
from importlib import import_module
class ValidationError(Exception):
"""
Error class for validation failed
"""
def __init__(self, payload: dict):
"""
:param message: error message
"""
self.payload = payload
def generate_err_msg(self, payload: dict, indent: int = 0) -> str:
"""
Generate human-friendly error message
example output:
key1: Error message
key2:
inner_key: error message
inner_key2:
key3: error message
"""
make_indent = ''.join([' ' for i in range(0, indent)])
previous_text = ''
for (key, errors) in payload.items():
for err in errors:
if isinstance(err, dict):
previous_text += '{}{}:\n'.format(make_indent, key)
previous_text += self.generate_err_msg(err, indent+1)
pass
else:
previous_text += '{}{}: {}\n'.format(make_indent, key, err)
pass
return previous_text
@property
def message(self):
return self.generate_err_msg(self.payload)
class CatConfig:
def __init__(self, format: str = 'json', validator_schema: dict = None, data: dict = None):
"""
:param format: Format of data used for read (json/toml/yaml)
:param validator_schema: Schema for validator (see https://docs.python-cerberus.org/en/stable/usage.html)
:param data: Config data
"""
self._parser = None
self._data = {}
if not data == None:
self._data = data
self._validator_schema = validator_schema
if format:
self._import_parser(format)
self._config = {}
def _import_parser(self, parser_name: str):
if parser_name == 'json':
self._parser = import_module('json')
elif parser_name == 'toml':
try:
self._parser = import_module('toml')
except ImportError:
raise Exception(
"CatConfig needs toml parser to work, "
"please add `toml` module to your project")
elif parser_name == 'yaml':
try:
self._parser = import_module('yaml')
# it works! I love Python!
self._parser.loads = self._parser.load
except ImportError:
raise Exception(
"CatConfig needs yaml parser to work, "
"please add `pyyaml` module to your project\n")
else:
raise Exception('Unsupported parser type')
def load_from_file(self, file_path: str, format: 'str' = None) -> None:
"""
Update config from file
:param file_path: config file path
:param format: format of config file (default: json)
"""
with open(file_path, 'r') as f:
self.load_from_string(f.read(), format)
def load_from_string(self, data: str, format: 'str' = None) -> None:
"""
Update config from string and validate
:param data: target data
:param format: format of config file (default: json)
"""
if format:
self._import_parser(format)
return self.load(self._parser.loads(data))
def load(self, data: dict) -> None:
"""
Update config from param `data`
:param data: data
"""
if self._validator_schema:
self.validate(data)
self._data.update(data)
def validate(self, data: str) -> None:
"""
Validate data
:param data: config data
"""
try:
cerberus = import_module('cerberus')
except ImportError:
raise Exception('CatConfig need `cerberus` module to make validation work normally, '
'please add `cerberus` module to your project.')
v = cerberus.Validator(self._validator_schema)
v.validate(data)
if v != True:
raise ValidationError(v.errors)
def update(self, data: dict) -> None:
"""
Update config item
:param data: data to be updated
"""
self._data.update(data)
def set(self, key: str, value: str) -> None:
"""
Set config value
:param key: key of config item
:param value: value of config item
"""
return self.update({key: value})
def get(self, key: str=None) -> Any:
"""
Get item by key
It will return self contained object if param `key` == None
:param key: key
"""
if key == None:
return self._data
if key in self._data:
data = self._data.get(key)
if isinstance(data, dict):
return CatConfig(data=data)
elif isinstance(data, list):
return [CatConfig(data=x) for x in data]
else:
return data
return CatConfig()
def __getitem__(self, key: str) -> Any:
return self.get(key)
def __bool__(self):
"""
Return False if `self._data` has no item
"""
return len(self._data) != 0
def __getattr__(self, name: str) -> Any:
return self.__getitem__(name)
def __eq__(self, b):
"""
Make sure CatConfig object without any data equal False
"""
if b == None:
if len(self._data.keys()) == 0:
return True
return self._data == b
def __str__(self):
if self._data == {}:
return 'None'
return str(self._data) | [
[
[
17,
21
]
],
[
[
41,
44
],
[
4622,
4625
],
[
5188,
5191
],
[
5397,
5400
]
],
[
[
67,
80
],
[
1972,
1985
],
[
2078,
2091
],
[
2373,
2386
],
[
3822,
3835
]
],
[
[
88,
103
],
[
4171,
4186
]
],
[
[
1266,
1275
],
[
4952,
4961
],
[
5038,
5047
],
[
5132,
5141
]
]
] |
from engine.utils import RF_sq64, sq64_to_sq120, print_board
def react_chess_board_to_IZII_board(board):
if board is None:
exit()
izii_board = ["x"] * 120
pieces = board.split(',')
for i in range(len(izii_board)):
if i >= 20 and i < 100:
if i % 10 != 0 and i % 10 != 9:
izii_board[i] = 'o'
for p in pieces:
# print("pp", p)
piece_with_RF = p.split('@')
# print("look: ", piece_with_RF)
piece = piece_with_RF[0]
RF = piece_with_RF[1]
sq64 = RF_sq64(RF[0], RF[1])
sq120 = sq64_to_sq120(sq64)
izii_board[sq120] = piece
return ''.join(izii_board)
| [
[
[
25,
32
],
[
557,
564
]
],
[
[
34,
47
],
[
595,
608
]
],
[
[
49,
60
]
],
[
[
67,
98
]
]
] |
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.pandas.udf import udf
def make_t():
return ibis.table(
[
('_timestamp', 'int32'),
('dim1', 'int32'),
('dim2', 'int32'),
('valid_seconds', 'int32'),
('meas1', 'int32'),
('meas2', 'int32'),
('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('hour', 'int32'),
('minute', 'int32'),
],
name="t",
)
@pytest.fixture
def t():
return make_t()
def make_base(t):
return (
(t.year > 2016)
| ((t.year == 2016) & (t.month > 6))
| ((t.year == 2016) & (t.month == 6) & (t.day > 6))
| ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour > 6))
| (
(t.year == 2016)
& (t.month == 6)
& (t.day == 6)
& (t.hour == 6)
& (t.minute >= 5)
)
) & (
(t.year < 2016)
| ((t.year == 2016) & (t.month < 6))
| ((t.year == 2016) & (t.month == 6) & (t.day < 6))
| ((t.year == 2016) & (t.month == 6) & (t.day == 6) & (t.hour < 6))
| (
(t.year == 2016)
& (t.month == 6)
& (t.day == 6)
& (t.hour == 6)
& (t.minute <= 5)
)
)
@pytest.fixture
def base(t):
return make_base(t)
def make_large_expr(t, base):
src_table = t[base]
src_table = src_table.mutate(
_timestamp=(src_table['_timestamp'] - src_table['_timestamp'] % 3600)
.cast('int32')
.name('_timestamp'),
valid_seconds=300,
)
aggs = []
for meas in ['meas1', 'meas2']:
aggs.append(src_table[meas].sum().cast('float').name(meas))
src_table = src_table.aggregate(
aggs, by=['_timestamp', 'dim1', 'dim2', 'valid_seconds']
)
part_keys = ['year', 'month', 'day', 'hour', 'minute']
ts_col = src_table['_timestamp'].cast('timestamp')
new_cols = {}
for part_key in part_keys:
part_col = getattr(ts_col, part_key)()
new_cols[part_key] = part_col
src_table = src_table.mutate(**new_cols)
return src_table[
[
'_timestamp',
'dim1',
'dim2',
'meas1',
'meas2',
'year',
'month',
'day',
'hour',
'minute',
]
]
@pytest.fixture
def large_expr(t, base):
return make_large_expr(t, base)
@pytest.mark.benchmark(group="construction")
@pytest.mark.parametrize(
"construction_fn",
[
pytest.param(lambda *_: make_t(), id="small"),
pytest.param(lambda t, *_: make_base(t), id="medium"),
pytest.param(lambda t, base: make_large_expr(t, base), id="large"),
],
)
def test_construction(benchmark, construction_fn, t, base):
benchmark(construction_fn, t, base)
@pytest.mark.benchmark(group="builtins")
@pytest.mark.parametrize(
"expr_fn",
[
pytest.param(lambda t, _base, _large_expr: t, id="small"),
pytest.param(lambda _t, base, _large_expr: base, id="medium"),
pytest.param(lambda _t, _base, large_expr: large_expr, id="large"),
],
)
@pytest.mark.parametrize("builtin", [hash, str])
def test_builtins(benchmark, expr_fn, builtin, t, base, large_expr):
expr = expr_fn(t, base, large_expr)
benchmark(builtin, expr)
@pytest.mark.benchmark(group="compilation")
@pytest.mark.parametrize("module", ["impala", "sqlite"])
@pytest.mark.parametrize(
"expr_fn",
[
pytest.param(lambda t, _base, _large_expr: t, id="small"),
pytest.param(lambda _t, base, _large_expr: base, id="medium"),
pytest.param(lambda _t, _base, large_expr: large_expr, id="large"),
],
)
def test_compile(benchmark, module, expr_fn, t, base, large_expr):
try:
mod = getattr(ibis, module)
except AttributeError as e:
pytest.skip(str(e))
else:
expr = expr_fn(t, base, large_expr)
benchmark(mod.compile, expr)
@pytest.fixture
def pt():
n = 60_000
data = pd.DataFrame(
{
'key': np.random.choice(16000, size=n),
'low_card_key': np.random.choice(30, size=n),
'value': np.random.rand(n),
'timestamps': pd.date_range(
start='now', periods=n, freq='s'
).values,
'timestamp_strings': pd.date_range(
start='now', periods=n, freq='s'
).values.astype(str),
'repeated_timestamps': pd.date_range(
start='2018-09-01', periods=30
).repeat(int(n / 30)),
}
)
return ibis.pandas.connect(dict(df=data)).table('df')
def high_card_group_by(t):
return t.groupby(t.key).aggregate(avg_value=t.value.mean())
def cast_to_dates(t):
return t.timestamps.cast(dt.date)
def cast_to_dates_from_strings(t):
return t.timestamp_strings.cast(dt.date)
def multikey_group_by_with_mutate(t):
return (
t.mutate(dates=t.timestamps.cast('date'))
.groupby(['low_card_key', 'dates'])
.aggregate(avg_value=lambda t: t.value.mean())
)
def simple_sort(t):
return t.sort_by([t.key])
def simple_sort_projection(t):
return t[['key', 'value']].sort_by(['key'])
def multikey_sort(t):
return t.sort_by(['low_card_key', 'key'])
def multikey_sort_projection(t):
return t[['low_card_key', 'key', 'value']].sort_by(['low_card_key', 'key'])
def low_card_rolling_window(t):
return ibis.trailing_range_window(
ibis.interval(days=2),
order_by=t.repeated_timestamps,
group_by=t.low_card_key,
)
def low_card_grouped_rolling(t):
return t.value.mean().over(low_card_rolling_window(t))
def high_card_rolling_window(t):
return ibis.trailing_range_window(
ibis.interval(days=2),
order_by=t.repeated_timestamps,
group_by=t.key,
)
def high_card_grouped_rolling(t):
return t.value.mean().over(high_card_rolling_window(t))
@udf.reduction(['double'], 'double')
def my_mean(series):
return series.mean()
def low_card_grouped_rolling_udf_mean(t):
return my_mean(t.value).over(low_card_rolling_window(t))
def high_card_grouped_rolling_udf_mean(t):
return my_mean(t.value).over(high_card_rolling_window(t))
@udf.analytic(['double'], 'double')
def my_zscore(series):
return (series - series.mean()) / series.std()
def low_card_window(t):
return ibis.window(group_by=t.low_card_key)
def high_card_window(t):
return ibis.window(group_by=t.key)
def low_card_window_analytics_udf(t):
return my_zscore(t.value).over(low_card_window(t))
def high_card_window_analytics_udf(t):
return my_zscore(t.value).over(high_card_window(t))
@udf.reduction(['double', 'double'], 'double')
def my_wm(v, w):
return np.average(v, weights=w)
def low_card_grouped_rolling_udf_wm(t):
return my_wm(t.value, t.value).over(low_card_rolling_window(t))
def high_card_grouped_rolling_udf_wm(t):
return my_wm(t.value, t.value).over(low_card_rolling_window(t))
@pytest.mark.benchmark(group="execution")
@pytest.mark.parametrize(
"expression_fn",
[
pytest.param(high_card_group_by, id="high_card_group_by"),
pytest.param(cast_to_dates, id="cast_to_dates"),
pytest.param(
cast_to_dates_from_strings, id="cast_to_dates_from_strings"
),
pytest.param(
multikey_group_by_with_mutate, id="multikey_group_by_with_mutate"
),
pytest.param(simple_sort, id="simple_sort"),
pytest.param(simple_sort_projection, id="simple_sort_projection"),
pytest.param(multikey_sort, id="multikey_sort"),
pytest.param(multikey_sort_projection, id="multikey_sort_projection"),
pytest.param(low_card_grouped_rolling, id="low_card_grouped_rolling"),
pytest.param(
high_card_grouped_rolling, id="high_card_grouped_rolling"
),
pytest.param(
low_card_grouped_rolling_udf_mean,
id="low_card_grouped_rolling_udf_mean",
),
pytest.param(
high_card_grouped_rolling_udf_mean,
id="high_card_grouped_rolling_udf_mean",
),
pytest.param(
low_card_window_analytics_udf, id="low_card_window_analytics_udf"
),
pytest.param(
high_card_window_analytics_udf, id="high_card_window_analytics_udf"
),
pytest.param(
low_card_grouped_rolling_udf_wm,
id="low_card_grouped_rolling_udf_wm",
),
pytest.param(
high_card_grouped_rolling_udf_wm,
id="high_card_grouped_rolling_udf_wm",
),
],
)
def test_execute(benchmark, expression_fn, pt):
expr = expression_fn(pt)
benchmark(expr.execute)
| [
[
[
7,
18
],
[
4224,
4226
],
[
4285,
4287
],
[
4336,
4338
],
[
6934,
6936
]
],
[
[
26,
38
],
[
4181,
4183
],
[
4381,
4383
],
[
4500,
4502
],
[
4633,
4635
]
],
[
[
46,
52
],
[
587,
593
],
[
1423,
1429
],
[
2510,
2516
],
[
2589,
2595
],
[
2634,
2640
],
[
2696,
2702
],
[
2751,
2757
],
[
2814,
2820
],
[
2994,
3000
],
[
3035,
3041
],
[
3089,
3095
],
[
3156,
3162
],
[
3227,
3233
],
[
3305,
3311
],
[
3494,
3500
],
[
3538,
3544
],
[
3595,
3601
],
[
3649,
3655
],
[
3716,
3722
],
[
3787,
3793
],
[
4130,
4136
],
[
7183,
7189
],
[
7225,
7231
],
[
7285,
7291
],
[
7352,
7358
],
[
7409,
7415
],
[
7514,
7520
],
[
7625,
7631
],
[
7678,
7684
],
[
7753,
7759
],
[
7810,
7816
],
[
7889,
7895
],
[
7968,
7974
],
[
8071,
8077
],
[
8203,
8209
],
[
8337,
8343
],
[
8448,
8454
],
[
8561,
8567
],
[
8689,
8695
],
[
4016,
4022
]
],
[
[
61,
65
],
[
167,
171
],
[
3962,
3966
],
[
4758,
4762
],
[
5613,
5617
],
[
5649,
5653
],
[
5891,
5895
],
[
5927,
5931
],
[
6562,
6566
],
[
6637,
6641
]
],
[
[
73,
98
],
[
4951,
4953
],
[
5033,
5035
]
],
[
[
136,
139
],
[
6119,
6122
],
[
6416,
6419
],
[
6860,
6863
]
],
[
[
146,
152
],
[
622,
628
],
[
2720,
2726
]
],
[
[
606,
607
]
],
[
[
637,
646
],
[
1462,
1471
],
[
2778,
2787
]
],
[
[
1442,
1446
]
],
[
[
1481,
1496
],
[
2561,
2576
],
[
2843,
2858
]
],
[
[
2529,
2539
]
],
[
[
2895,
2912
]
],
[
[
3357,
3370
]
],
[
[
3868,
3880
]
],
[
[
4149,
4151
]
],
[
[
4811,
4829
],
[
7298,
7316
]
],
[
[
4904,
4917
],
[
7365,
7378
]
],
[
[
4966,
4992
],
[
7435,
7461
]
],
[
[
5048,
5077
],
[
7540,
7569
]
],
[
[
5256,
5267
],
[
7638,
7649
]
],
[
[
5308,
5330
],
[
7691,
7713
]
],
[
[
5389,
5402
],
[
7766,
7779
]
],
[
[
5459,
5483
],
[
7823,
7847
]
],
[
[
5574,
5597
],
[
5817,
5840
],
[
6278,
6301
],
[
7041,
7064
],
[
7152,
7175
]
],
[
[
5757,
5781
],
[
7902,
7926
]
],
[
[
5851,
5875
],
[
6087,
6111
],
[
6384,
6408
]
],
[
[
6026,
6051
],
[
7994,
8019
]
],
[
[
6159,
6166
],
[
6256,
6263
],
[
6362,
6369
]
],
[
[
6207,
6240
],
[
8097,
8130
]
],
[
[
6312,
6346
],
[
8229,
8263
]
],
[
[
6455,
6464
],
[
6716,
6725
],
[
6812,
6821
]
],
[
[
6531,
6546
],
[
6740,
6755
]
],
[
[
6605,
6621
],
[
6836,
6852
]
],
[
[
6671,
6700
],
[
8363,
8392
]
],
[
[
6766,
6796
],
[
8474,
8504
]
],
[
[
6910,
6915
],
[
7012,
7017
],
[
7123,
7128
]
],
[
[
6965,
6996
],
[
8587,
8618
]
],
[
[
7075,
7107
],
[
8715,
8747
]
],
[
[
8824,
8836
]
]
] |
import time
from threading import Thread
def timestamp_datetime(value):
format = '%Y-%m-%d %H:%M:%S'
value = time.localtime(value)
dt = time.strftime(format, value)
return dt
def log(s):
print("[",timestamp_datetime(time.time()),"]",s)
| [
[
[
7,
11
],
[
118,
122
],
[
149,
153
],
[
239,
243
]
],
[
[
34,
40
]
],
[
[
46,
64
],
[
220,
238
]
],
[
[
198,
201
]
]
] |
from time import sleep
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from celery.schedules import crontab
from celery.task import periodic_task
from crypto.models import Cryptocurrency
# @shared_task
@periodic_task(
run_every=(crontab(minute="*/15")),
name="create_cryptocurrency",
# ignore_result=True
)
def create_cryptocurrency():
print("Crawling data and creating objects in database ..")
req = Request("https://coinranking.com", headers={"User-Agent": "Mozilla/5.0"})
html = urlopen(req).read()
bs = BeautifulSoup(html, "html.parser")
# Find first 5 table rows
rows = bs.find("tbody", class_="table__body").find_all("tr", class_="table__row")[
0:5
]
for row in rows:
cryptocurrency = (
row.find("span", class_="profile__name")
.get_text()
.strip()
.replace("\n", "")
)
values = row.find_all("div", class_="valuta")
price = values[0].get_text().strip().replace("\n", "")
market_cap = values[1].get_text().strip().replace("\n", "")
change = (
row.find("div", class_="change")
.find("span")
.get_text()
.strip()
.replace("\n", "")
)
print(
{
"cryptocurrency": cryptocurrency,
"price": price,
"market_cap": market_cap,
"change": change,
}
)
# Create object in database from crawled data
Cryptocurrency.objects.create(
cryptocurrency=cryptocurrency,
price=price,
market_cap=market_cap,
change=change,
)
# Sleep 3 seconds to avoid any errors
sleep(3)
# @shared_task
@periodic_task(
run_every=(crontab(minute="*/15")),
name="update_cryptocurrency",
)
def update_cryptocurrency():
print("Updating data ..")
req = Request("https://coinranking.com", headers={"User-Agent": "Mozilla/5.0"})
html = urlopen(req).read()
bs = BeautifulSoup(html, "html.parser")
rows = bs.find("tbody", class_="table__body").find_all("tr", class_="table__row")[
0:5
]
for row in rows:
cryptocurrency = (
row.find("span", class_="profile__name")
.get_text()
.strip()
.replace("\n", "")
)
values = row.find_all("div", class_="valuta")
price = values[0].get_text().strip().replace("\n", "")
market_cap = values[1].get_text().strip().replace("\n", "")
change = (
row.find("div", class_="change")
.find("span")
.get_text()
.strip()
.replace("\n", "")
)
print(
{
"cryptocurrency": cryptocurrency,
"price": price,
"market_cap": market_cap,
"change": change,
}
)
data = {
"cryptocurrency": cryptocurrency,
"price": price,
"market_cap": market_cap,
"change": change,
}
Cryptocurrency.objects.filter(cryptocurrency=cryptocurrency).update(**data)
sleep(3)
| [
[
[
17,
22
],
[
1781,
1786
],
[
3244,
3249
]
],
[
[
50,
57
],
[
536,
543
],
[
2053,
2060
]
],
[
[
59,
66
],
[
451,
458
],
[
1968,
1975
]
],
[
[
84,
97
],
[
565,
578
],
[
2082,
2095
]
],
[
[
127,
134
],
[
263,
270
],
[
1838,
1845
]
],
[
[
159,
172
],
[
233,
246
],
[
1808,
1821
]
],
[
[
200,
214
],
[
1556,
1570
],
[
3159,
3173
]
],
[
[
353,
374
]
],
[
[
1903,
1924
]
]
] |
# generated by datamodel-codegen:
# filename: schema/entity/data/database.json
# timestamp: 2021-09-27T15:46:37+00:00
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel, Field, constr
from ...type import basic, entityReference, usageDetails
class DatabaseName(BaseModel):
__root__: constr(regex=r'^[^.]*$', min_length=1, max_length=64) = Field(
..., description='Name that identifies the database.'
)
class Database(BaseModel):
id: Optional[basic.Uuid] = Field(
None, description='Unique identifier that identifies this database instance.'
)
name: DatabaseName = Field(..., description='Name that identifies the database.')
fullyQualifiedName: Optional[str] = Field(
None,
description="Name that uniquely identifies a database in the format 'ServiceName.DatabaseName'.",
)
description: Optional[str] = Field(
None, description='Description of the database instance.'
)
href: Optional[basic.Href] = Field(
None, description='Link to the resource corresponding to this entity.'
)
owner: Optional[entityReference.EntityReference] = Field(
None, description='Owner of this database.'
)
service: entityReference.EntityReference = Field(
...,
description='Link to the database cluster/service where this database is hosted in.',
)
usageSummary: Optional[usageDetails.TypeUsedToReturnUsageDetailsOfAnEntity] = Field(
None, description='Latest usage information for this database.'
)
tables: Optional[entityReference.EntityReferenceList] = Field(
None, description='References to tables in the database.'
)
| [
[
[
147,
158
]
],
[
[
179,
187
],
[
508,
516
],
[
740,
748
],
[
906,
914
],
[
1011,
1019
],
[
1137,
1145
],
[
1431,
1439
],
[
1592,
1600
]
],
[
[
210,
219
],
[
314,
323
],
[
488,
497
]
],
[
[
221,
226
],
[
396,
401
],
[
531,
536
],
[
655,
660
],
[
756,
761
],
[
922,
927
],
[
1034,
1039
],
[
1181,
1186
],
[
1293,
1298
],
[
1495,
1500
],
[
1640,
1645
]
],
[
[
228,
234
],
[
340,
346
]
],
[
[
256,
261
],
[
517,
522
],
[
1020,
1025
]
],
[
[
263,
278
],
[
1146,
1161
],
[
1259,
1274
],
[
1601,
1616
]
],
[
[
280,
292
],
[
1440,
1452
]
],
[
[
301,
313
],
[
640,
652
]
],
[
[
479,
487
]
]
] |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and Saves a New User"""
if not email:
raise ValueError('Users must have a Valid Email Address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and Saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom User Model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe """
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used for a recipe """
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe Object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
def __str__(self):
return self.title
| [
[
[
22,
28
],
[
1001,
1007
],
[
1059,
1065
],
[
1108,
1114
],
[
1157,
1163
],
[
1262,
1268
],
[
1327,
1333
],
[
1371,
1377
],
[
1442,
1448
],
[
1532,
1538
],
[
1604,
1610
],
[
1648,
1654
],
[
1719,
1725
],
[
1805,
1811
],
[
1855,
1861
],
[
1926,
1932
],
[
1960,
1966
],
[
2012,
2018
],
[
2046,
2052
],
[
2109,
2115
],
[
2172,
2178
],
[
2220,
2226
]
],
[
[
68,
84
],
[
878,
894
]
],
[
[
86,
101
],
[
214,
229
]
],
[
[
144,
160
],
[
896,
912
]
],
[
[
185,
193
],
[
1398,
1406
],
[
1675,
1683
],
[
1882,
1890
]
],
[
[
202,
213
],
[
1206,
1217
]
],
[
[
873,
877
]
],
[
[
1258,
1261
]
],
[
[
1521,
1531
]
],
[
[
1798,
1804
]
]
] |
import re
import pickle
import tempfile
import pytest
from _pytest.config import Config
from _pytest._io.terminalwriter import TerminalWriter
from _pytest.reports import TestReport
from pytest_fold.tui_pytermtk import main as tuitk
from pytest_fold.tui_textual1 import main as tuitxt1
from pytest_fold.tui_textual2 import main as tuitxt2
from pytest_fold.utils import (
test_session_starts_matcher,
errors_section_matcher,
failures_section_matcher,
warnings_summary_matcher,
passes_section_matcher,
short_test_summary_matcher,
lastline_matcher,
MARKERS,
REPORTFILE,
MARKEDTERMINALOUTPUTFILE,
UNMARKEDTERMINALOUTPUTFILE,
)
# Don't collect tests from any of these files
collect_ignore = [
"setup.py",
"plugin.py",
]
# A list of TestReport objects generated by Pytest during test run.
# Each TestReport represents a single test's operation during one of
# Pytest's three phases: setup | call | teardown
reports = []
def pytest_addoption(parser):
"""Define the plugin's option flags as presented by Pytest"""
group = parser.getgroup("fold")
group.addoption(
"--fold",
action="store_true",
help="fold failed test output sections",
)
group.addoption(
"--fold-tui",
"--ft",
action="store",
default="pytermtk",
help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')",
choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"],
)
def pytest_report_teststatus(report: TestReport, config: Config):
"""Construct list(s) of individial TestReport instances"""
reports.append(report)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
"""
Write console output to a file for use by TUI
This code works by looking at every line sent by Pytest to the terminal,
and based on its category, marking or not marking it
"""
config.option.verbose = (
1 # force verbose mode for easier parsing of final test results
)
config.option.reportchars = (
"A" # force "display all" mode so all results can be shown
)
if config.option.fold:
tr = config.pluginmanager.getplugin("terminalreporter")
if tr is not None:
# identify and mark the very first line of terminal output
try:
config._pyfoldfirsttime
except AttributeError:
config._pyfoldfirsttime = True
config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+")
config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+")
oldwrite = tr._tw.write
# identify and mark each results section
def tee_write(s, **kwargs):
if re.search(test_session_starts_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_test_session_starts"] + "\n").encode(
"utf-8"
)
)
if re.search(errors_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8")
)
if re.search(failures_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8")
)
if re.search(warnings_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8")
)
if re.search(passes_section_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8")
)
if re.search(short_test_summary_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_short_test_summary"] + "\n").encode(
"utf-8"
)
)
if re.search(lastline_matcher, s):
config._pyfold_marked_outputfile.write(
(MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8")
)
# Write this line's text along with its markup info to console
oldwrite(s, **kwargs)
# Mark up this line's text by passing it to an instance of TerminalWriter's
# 'markup' method. Do not pass "flush" to the method or it will throw an error.
s1 = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s1 = TerminalWriter().markup(s, **kwargs)
# Encode the marked up line so it can be written to the config object.
# The Pytest config object can be used by plugins for conveying staeful
# info across an entire test run session.
if isinstance(s1, str):
marked_up = s1.encode("utf-8")
config._pyfold_marked_outputfile.write(marked_up)
# Write this line's original (unmarked) text to unmarked file
s_orig = s
kwargs.pop("flush") if "flush" in kwargs.keys() else None
s_orig = TerminalWriter().markup(s, **kwargs)
if isinstance(s_orig, str):
unmarked_up = s_orig.encode("utf-8")
config._pyfold_unmarked_outputfile.write(unmarked_up)
# Write to both terminal/console and tempfiles:
# _pyfold_marked_outputfile, _pyfold_unmarked_outputfile
tr._tw.write = tee_write
def pytest_unconfigure(config: Config):
"""
Write terminal and test results info to files for use by TUI
"""
# Write terminal output to file
if hasattr(config, "_pyfold_marked_outputfile"):
# get terminal contents, then write file
config._pyfold_marked_outputfile.seek(0)
markedsessionlog = config._pyfold_marked_outputfile.read()
config._pyfold_marked_outputfile.close()
if hasattr(config, "_pyfold_unmarked_outputfile"):
# get terminal contents, then write file
config._pyfold_unmarked_outputfile.seek(0)
unmarkedsessionlog = config._pyfold_unmarked_outputfile.read()
config._pyfold_unmarked_outputfile.close()
# Undo our patching in the terminal reporter
config.pluginmanager.getplugin("terminalreporter")
# Write marked-up results to file
with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file:
marked_file.write(markedsessionlog)
# Write un-marked-up results to file
with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file:
unmarked_file.write(unmarkedsessionlog)
# Write the reports list to file
with open(REPORTFILE, "wb") as report_file:
pickle.dump(reports, report_file)
# Launch the TUI
if config.getoption("--fold") == True:
pyfold_tui(config)
def pyfold_tui(config: Config) -> None:
"""
Final code invocation after Pytest run has completed.
This method calls the Pyfold TUI to display final results.
"""
# disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has
# no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25
if not config.getoption("--fold"):
return
capmanager = config.pluginmanager.getplugin("capturemanager")
try:
capmanager.suspend_global_capture(in_=True)
finally:
if config.getoption("--ft") in ["k", "pytermtk"]:
tuitk()
elif config.getoption("--ft") in ["t1", "textual1"]:
tuitxt1()
elif config.getoption("--ft") in ["t2", "textual2"]:
tuitxt2()
elif config.getoption("--ft") not in ["n", "none"]:
print(f"Incorrect choice for fold-tui: {config.getoption('--ft')}")
capmanager.resume_global_capture()
| [
[
[
7,
9
],
[
2822,
2824
],
[
3113,
3115
],
[
3340,
3342
],
[
3571,
3573
],
[
3802,
3804
],
[
4029,
4031
],
[
4318,
4320
]
],
[
[
17,
23
],
[
7195,
7201
]
],
[
[
31,
39
],
[
2566,
2574
],
[
2643,
2651
]
],
[
[
47,
53
],
[
1694,
1700
]
],
[
[
82,
88
],
[
1592,
1598
],
[
1753,
1759
],
[
5980,
5986
],
[
7346,
7352
]
],
[
[
128,
142
],
[
4939,
4953
],
[
5572,
5586
]
],
[
[
171,
181
],
[
1572,
1582
]
],
[
[
219,
232
],
[
7947,
7952
]
],
[
[
270,
285
],
[
8028,
8035
]
],
[
[
323,
338
],
[
8111,
8118
]
],
[
[
375,
402
],
[
2832,
2859
]
],
[
[
408,
430
],
[
3123,
3145
]
],
[
[
436,
460
],
[
3350,
3374
]
],
[
[
466,
490
],
[
3581,
3605
]
],
[
[
496,
518
],
[
3812,
3834
]
],
[
[
524,
550
],
[
4039,
4065
]
],
[
[
556,
572
],
[
4328,
4344
]
],
[
[
578,
585
],
[
2950,
2957
],
[
3236,
3243
],
[
3465,
3472
],
[
3696,
3703
],
[
3925,
3932
],
[
4156,
4163
],
[
4435,
4442
]
],
[
[
591,
601
],
[
7149,
7159
]
],
[
[
607,
631
],
[
6825,
6849
]
],
[
[
637,
663
],
[
6985,
7011
]
],
[
[
715,
729
]
],
[
[
956,
963
],
[
1668,
1675
],
[
7207,
7214
]
],
[
[
975,
991
]
],
[
[
1539,
1563
]
],
[
[
1728,
1744
]
],
[
[
5953,
5971
]
],
[
[
7327,
7337
],
[
7302,
7312
]
]
] |
import argparse
import os
import sys
import time
import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter, Constant
from sklearn.datasets import load_iris
from sklearn.metrics import balanced_accuracy_score
from sklearn.model_selection import train_test_split
sys.path.append(os.getcwd())
from mindware.utils.data_manager import DataManager
from mindware.estimators import Classifier
from mindware.components.models.base_model import BaseClassificationModel
from mindware.components.models.classification import add_classifier
from mindware.components.utils.configspace_utils import check_none
from mindware.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS
parser = argparse.ArgumentParser()
parser.add_argument('--time_limit', type=int, default=1200)
args = parser.parse_args()
time_limit = args.time_limit
class UserDefinedDecisionTree(BaseClassificationModel):
def __init__(self, criterion, max_features, max_depth_factor,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_leaf_nodes, min_impurity_decrease, class_weight=None,
random_state=None):
self.criterion = criterion
self.max_features = max_features
self.max_depth_factor = max_depth_factor
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self.class_weight = class_weight
self.estimator = None
self.time_limit = None
def fit(self, X, y, sample_weight=None):
from sklearn.tree import DecisionTreeClassifier
self.max_features = float(self.max_features)
# Heuristic to set the tree depth
if check_none(self.max_depth_factor):
max_depth_factor = self.max_depth_factor = None
else:
num_features = X.shape[1]
self.max_depth_factor = int(self.max_depth_factor)
max_depth_factor = max(
1,
int(np.round(self.max_depth_factor * num_features, 0)))
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.estimator = DecisionTreeClassifier(
criterion=self.criterion,
max_depth=max_depth_factor,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_leaf_nodes=self.max_leaf_nodes,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
class_weight=self.class_weight,
random_state=self.random_state)
self.estimator.fit(X, y, sample_weight=sample_weight)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
probas = self.estimator.predict_proba(X)
return probas
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'DT',
'name': 'Decision Tree Classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter(
"criterion", ["gini", "entropy"], default_value="gini")
max_depth_factor = UniformFloatHyperparameter(
'max_depth_factor', 0., 2., default_value=0.5)
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1)
min_weight_fraction_leaf = Constant("min_weight_fraction_leaf", 0.0)
max_features = UnParametrizedHyperparameter('max_features', 1.0)
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)
cs.add_hyperparameters([criterion, max_features, max_depth_factor,
min_samples_split, min_samples_leaf,
min_weight_fraction_leaf, max_leaf_nodes,
min_impurity_decrease])
return cs
print('==> Start to evaluate with Budget %d' % time_limit)
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
dm = DataManager(X_train, y_train)
train_data = dm.get_data_node(X_train, y_train)
test_data = dm.get_data_node(X_test, y_test)
save_dir = './data/eval_exps/soln-ml'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
add_classifier(UserDefinedDecisionTree)
clf = Classifier(time_limit=time_limit,
output_dir=save_dir,
include_algorithms=['UserDefinedDecisionTree'],
random_state=1,
metric='acc',
n_jobs=1)
_start_time = time.time()
_iter_id = 0
clf.fit(train_data)
pred = clf.predict(test_data)
print(balanced_accuracy_score(test_data.data[1], pred))
| [
[
[
7,
15
],
[
887,
895
]
],
[
[
23,
25
],
[
469,
471
],
[
5919,
5921
],
[
5949,
5951
]
],
[
[
33,
36
],
[
453,
456
]
],
[
[
44,
48
],
[
6260,
6264
]
],
[
[
56,
67
],
[
2377,
2379
]
],
[
[
113,
131
],
[
4384,
4402
]
],
[
[
172,
198
],
[
4559,
4585
]
],
[
[
206,
234
],
[
4682,
4710
],
[
4804,
4832
]
],
[
[
236,
261
],
[
4429,
4454
]
],
[
[
269,
297
],
[
5002,
5030
],
[
5081,
5109
],
[
5172,
5200
]
],
[
[
299,
307
],
[
4933,
4941
]
],
[
[
337,
346
],
[
5613,
5622
]
],
[
[
375,
398
],
[
6343,
6366
]
],
[
[
435,
451
],
[
5690,
5706
]
],
[
[
522,
533
],
[
5750,
5761
]
],
[
[
566,
576
],
[
6018,
6028
]
],
[
[
627,
650
],
[
1062,
1085
]
],
[
[
705,
719
],
[
5972,
5986
]
],
[
[
776,
786
],
[
2092,
2102
],
[
2560,
2570
]
],
[
[
835,
840
],
[
4159,
4164
]
],
[
[
842,
848
],
[
4166,
4172
]
],
[
[
850,
863
],
[
4174,
4187
]
],
[
[
865,
876
],
[
4217,
4228
]
],
[
[
878,
884
],
[
913,
919
],
[
980,
986
]
],
[
[
973,
977
],
[
1014,
1018
]
],
[
[
1001,
1011
],
[
5593,
5603
],
[
6040,
6050
]
],
[
[
1038,
1061
],
[
5987,
6010
]
],
[
[
5606,
5610
],
[
5632,
5636
],
[
5643,
5647
]
],
[
[
5625,
5626
],
[
5707,
5708
]
],
[
[
5628,
5629
],
[
5710,
5711
]
],
[
[
5655,
5662
],
[
5762,
5769
],
[
5810,
5817
]
],
[
[
5664,
5670
],
[
5857,
5863
]
],
[
[
5672,
5679
],
[
5771,
5778
],
[
5819,
5826
]
],
[
[
5681,
5687
],
[
5865,
5871
]
],
[
[
5745,
5747
],
[
5793,
5795
],
[
5840,
5842
]
],
[
[
5780,
5790
],
[
6294,
6304
]
],
[
[
5828,
5837
],
[
6325,
6334
],
[
6367,
6376
]
],
[
[
5874,
5882
],
[
5934,
5942
],
[
5961,
5969
],
[
6080,
6088
]
],
[
[
6012,
6015
],
[
6286,
6289
],
[
6313,
6316
]
],
[
[
6246,
6257
]
],
[
[
6272,
6280
]
],
[
[
6306,
6310
],
[
6386,
6390
]
]
] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=g-import-not-at-top
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-direct-tensorflow-import
"""Utilies for image preprocessing and augmentation.
Warning: `tf.keras.preprocessing.image` APIs do not operate on tensors and are
not recommended for new code. Prefer loading data with
`tf.keras.utils.image_dataset_from_directory`, and then transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and [augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the
[preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
"""
import collections
import io
import multiprocessing
import os
import pathlib
import threading
import warnings
from keras import backend
from keras.utils import data_utils
import numpy as np
from tensorflow.python.util.tf_export import keras_export
try:
import scipy
from scipy import linalg # pylint: disable=unused-import
from scipy import ndimage # pylint: disable=unused-import
except ImportError:
pass
try:
from PIL import Image as pil_image
from PIL import ImageEnhance
except ImportError:
pil_image = None
ImageEnhance = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
'hamming': pil_image.HAMMING,
'box': pil_image.BOX,
'lanczos': pil_image.LANCZOS,
}
@keras_export('keras.utils.array_to_img',
'keras.preprocessing.image.array_to_img')
def array_to_img(x, data_format=None, scale=True, dtype=None):
"""Converts a 3D Numpy array to a PIL Image instance.
Usage:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = tf.keras.preprocessing.image.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a Numpy array.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
f'Got array with shape: {x.shape}')
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError(f'Invalid data_format: {data_format}')
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x - np.min(x)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
if np.max(x) > 255:
# 32-bit signed integer grayscale image. PIL mode "I"
return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError(f'Unsupported channel number: {x.shape[2]}')
@keras_export('keras.utils.img_to_array',
'keras.preprocessing.image.img_to_array')
def img_to_array(img, data_format=None, dtype=None):
"""Converts a PIL Image instance to a Numpy array.
Usage:
```python
from PIL import Image
img_data = np.random.random(size=(100, 100, 3))
img = tf.keras.preprocessing.image.array_to_img(img_data)
array = tf.keras.preprocessing.image.img_to_array(img)
```
Args:
img: Input PIL Image instance.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError(f'Unknown data_format: {data_format}')
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError(f'Unsupported image shape: {x.shape}')
return x
@keras_export('keras.utils.save_img', 'keras.preprocessing.image.save_img')
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
Args:
path: Path or file object.
x: Numpy array.
data_format: Image data format, either "channels_first" or
"channels_last".
file_format: Optional file format override. If omitted, the format to use
is determined from the filename extension. If a file object was used
instead of a filename, this parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
if data_format is None:
data_format = backend.image_data_format()
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):
warnings.warn('The JPG format does not support '
'RGBA images, converting to RGB.')
img = img.convert('RGB')
img.save(path, format=file_format, **kwargs)
@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')
def load_img(path,
grayscale=False,
color_mode='rgb',
target_size=None,
interpolation='nearest',
keep_aspect_ratio=False):
"""Loads an image into PIL format.
Usage:
```
image = tf.keras.preprocessing.image.load_img(image_path)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
```
Args:
path: Path to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". The desired
image format.
target_size: Either `None` (default to original size) or tuple of ints
`(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if grayscale:
warnings.warn('grayscale is deprecated. Please use '
'color_mode = "grayscale"')
color_mode = 'grayscale'
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `load_img` requires PIL.')
if isinstance(path, io.BytesIO):
img = pil_image.open(path)
elif isinstance(path, (pathlib.Path, bytes, str)):
if isinstance(path, pathlib.Path):
path = str(path.resolve())
with open(path, 'rb') as f:
img = pil_image.open(io.BytesIO(f.read()))
else:
raise TypeError('path should be path-like or io.BytesIO'
', not {}'.format(type(path)))
if color_mode == 'grayscale':
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
elif color_mode == 'rgba':
if img.mode != 'RGBA':
img = img.convert('RGBA')
elif color_mode == 'rgb':
if img.mode != 'RGB':
img = img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError('Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
', '.join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
if keep_aspect_ratio:
width, height = img.size
target_width, target_height = width_height_tuple
crop_height = (width * target_height) // target_width
crop_width = (height * target_width) // target_height
# Set back to input height / width
# if crop_height / crop_width is not smaller.
crop_height = min(height, crop_height)
crop_width = min(width, crop_width)
crop_box_hstart = (height - crop_height) // 2
crop_box_wstart = (width - crop_width) // 2
crop_box_wend = crop_box_wstart + crop_width
crop_box_hend = crop_box_hstart + crop_height
crop_box = [
crop_box_wstart, crop_box_hstart, crop_box_wend, crop_box_hend
]
img = img.resize(width_height_tuple, resample, box=crop_box)
else:
img = img.resize(width_height_tuple, resample)
return img
@keras_export('keras.preprocessing.image.Iterator')
class Iterator(data_utils.Sequence):
"""Base class for image data iterators.
Warning: `tf.keras.preprocessing.image.Iterator` is not recommended for
new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
Args:
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff')
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx, length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:self.batch_size *
(idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
if self.n == 0:
# Avoiding modulo by zero error
current_index = 0
else:
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
raise NotImplementedError
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension.
Args:
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean, follow symbolic links to subdirectories.
Yields:
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
if fname.lower().endswith('.tiff'):
warnings.warn('Using ".tiff" files with multiple bands '
'will cause distortion. Please verify your output.')
if fname.lower().endswith(white_list_formats):
yield root, fname
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
Args:
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean, follow symbolic links to subdirectories.
Returns:
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
all_files = list(
_iter_valid_files(directory, white_list_formats, follow_links))
num_files = len(all_files)
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = all_files[start:stop]
else:
valid_files = _iter_valid_files(directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(dirname,
os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class BatchFromFilesMixin():
"""Adds methods related to getting batches from filenames.
It includes the logic to transform image files to batches.
"""
def set_processing_attrs(self, image_data_generator, target_size, color_mode,
data_format, save_to_dir, save_prefix, save_format,
subset, interpolation, keep_aspect_ratio):
"""Sets attributes to use later for processing files into a batch.
Args:
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images
to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
"""
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.keep_aspect_ratio = keep_aspect_ratio
if color_mode not in {'rgb', 'rgba', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb", "rgba", or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgba':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (4,)
else:
self.image_shape = (4,) + self.target_size
elif self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split # pylint: disable=protected-access
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: %s;'
'expected "training" or "validation"' % (subset,))
else:
split = None
self.split = split
self.subset = subset
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
img = load_img(
filepaths[j],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation,
keep_aspect_ratio=self.keep_aspect_ratio)
x = img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode in {'binary', 'sparse'}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), len(self.class_indices)),
dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i, self.classes[n_observation]] = 1.
elif self.class_mode == 'multi_output':
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == 'raw':
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
@property
def filepaths(self):
"""List of absolute paths to image files."""
raise NotImplementedError(
'`filepaths` property method has not been implemented in {}.'.format(
type(self).__name__))
@property
def labels(self):
"""Class labels of every observation."""
raise NotImplementedError(
'`labels` property method has not been implemented in {}.'.format(
type(self).__name__))
@property
def sample_weight(self):
raise NotImplementedError(
'`sample_weight` property method has not been implemented in {}.'
.format(type(self).__name__))
@keras_export('keras.preprocessing.image.DirectoryIterator')
class DirectoryIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk.
Warning: `tf.keras.preprocessing.image.DirectoryIterator` is not recommended
for new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
directory: Path to the directory to read images from. Each subdirectory in
this directory will be considered to contain images from one class, or
alternatively you could specify class subdirectories via the `classes`
argument.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. Color mode to read
images.
classes: Optional list of strings, names of subdirectories containing
images from each class (e.g. `["dogs", "cats"]`). It will be computed
automatically if not set.
class_mode: Mode for yielding the targets:
- `"binary"`: binary targets (if there are only two classes),
- `"categorical"`: categorical targets,
- `"sparse"`: integer targets,
- `"input"`: targets are images identical to input images (mainly used
to work with autoencoders),
- `None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
dtype: Dtype to use for generated arrays.
"""
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
keep_aspect_ratio=False,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
super().set_processing_attrs(image_data_generator, target_size, color_mode,
data_format, save_to_dir, save_prefix,
save_format, subset, interpolation,
keep_aspect_ratio)
self.directory = directory
self.classes = classes
if class_mode not in self.allowed_class_modes:
raise ValueError('Invalid class_mode: {}; expected one of: {}'
.format(class_mode, self.allowed_class_modes))
self.class_mode = class_mode
self.dtype = dtype
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, self.white_list_formats, self.split,
self.class_indices, follow_links)))
classes_list = []
for res in results:
classes, filenames = res.get()
classes_list.append(classes)
self.filenames += filenames
self.samples = len(self.filenames)
self.classes = np.zeros((self.samples,), dtype='int32')
for classes in classes_list:
self.classes[i:i + len(classes)] = classes
i += len(classes)
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
pool.close()
pool.join()
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
return self.classes
@property # mixin needs this property to work
def sample_weight(self):
# no sample weights will be returned
return None
@keras_export('keras.preprocessing.image.NumpyArrayIterator')
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
Warning: `tf.keras.preprocessing.image.NumpyArrayIterator` is not recommended
for new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Numpy array of input data or tuple. If tuple, the second elements is
either another numpy array or a list of numpy arrays, each of which gets
passed through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
ignore_class_split: Boolean (default: False), ignore difference
in number of classes in labels across train and validation
split (useful for non-classification tasks)
dtype: Dtype to use for the generated arrays.
"""
def __init__(self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
ignore_class_split=False,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.dtype = dtype
if isinstance(x, tuple) or isinstance(x, list):
if not isinstance(x[1], list):
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError('All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if (y is not None and not ignore_class_split and not np.array_equal(
np.unique(y[:split_idx]), np.unique(y[split_idx:]))):
raise ValueError('Training and validation subsets '
'have different number of classes after '
'the split. If your numpy arrays are '
'sorted by the label, you might want '
'to shuffle them.')
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError(
'Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3, or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super().__init__(x.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def validate_filename(filename, white_list_formats):
"""Check if a filename refers to a valid file.
Args:
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
Returns:
A boolean value indicating if the filename is valid or not
"""
return (filename.lower().endswith(white_list_formats) and
os.path.isfile(filename))
class DataFrameIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk as a dataframe.
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the images in
a string column. It should include other column/s depending on the
`class_mode`: - if `class_mode` is `"categorical"` (default value) it
must include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class or
list/tuple if multiple classes. - if `class_mode` is `"binary"` or
`"sparse"` it must include the given `y_col` column with class values
as strings. - if `class_mode` is `"raw"` or `"multi_output"` it should
contain the columns specified in `y_col`. - if `class_mode` is
`"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization. If None, no transformations and
normalizations are made.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. Color mode to read
images.
classes: Optional list of strings, classes to use (e.g. `["dogs",
"cats"]`). If None, all classes in `y_col` will be used.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", "sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels. Supports
multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels, - `None`, no targets
are returned (the generator will only yield batches of image data,
which is useful to use in `model.predict()`).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
dtype: Dtype to use for the generated arrays.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the instantiation of this class. Default:
`True`.
"""
allowed_class_modes = {
'binary', 'categorical', 'input', 'multi_output', 'raw', 'sparse', None
}
def __init__(self,
dataframe,
directory=None,
image_data_generator=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
keep_aspect_ratio=False,
dtype='float32',
validate_filenames=True):
super().set_processing_attrs(image_data_generator, target_size, color_mode,
data_format, save_to_dir, save_prefix,
save_format, subset, interpolation,
keep_aspect_ratio)
df = dataframe.copy()
self.directory = directory or ''
self.class_mode = class_mode
self.dtype = dtype
# check that inputs match the required class_mode
self._check_params(df, x_col, y_col, weight_col, classes)
if validate_filenames: # check which image files are valid and keep them
df = self._filter_valid_filepaths(df, x_col)
if class_mode not in ['input', 'multi_output', 'raw', None]:
df, classes = self._filter_classes(df, y_col, classes)
num_classes = len(classes)
# build an index of all the unique classes
self.class_indices = dict(zip(classes, range(len(classes))))
# retrieve only training or validation set
if self.split:
num_files = len(df)
start = int(self.split[0] * num_files)
stop = int(self.split[1] * num_files)
df = df.iloc[start:stop, :]
# get labels for each observation
if class_mode not in ['input', 'multi_output', 'raw', None]:
self.classes = self.get_classes(df, y_col)
self.filenames = df[x_col].tolist()
self._sample_weight = df[weight_col].values if weight_col else None
if class_mode == 'multi_output':
self._targets = [np.array(df[col].tolist()) for col in y_col]
if class_mode == 'raw':
self._targets = df[y_col].values
self.samples = len(self.filenames)
validated_string = 'validated' if validate_filenames else 'non-validated'
if class_mode in ['input', 'multi_output', 'raw', None]:
print(f'Found {self.samples} {validated_string} image filenames.')
else:
print(f'Found {self.samples} {validated_string} image filenames '
f'belonging to {num_classes} classes.')
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
def _check_params(self, df, x_col, y_col, weight_col, classes):
# check class mode is one of the currently supported
if self.class_mode not in self.allowed_class_modes:
raise ValueError('Invalid class_mode: {}; expected one of: {}'.format(
self.class_mode, self.allowed_class_modes))
# check that y_col has several column names if class_mode is multi_output
if (self.class_mode == 'multi_output') and not isinstance(y_col, list):
raise TypeError(
'If class_mode="{}", y_col must be a list. Received {}.'.format(
self.class_mode,
type(y_col).__name__))
# check that filenames/filepaths column values are all strings
if not all(df[x_col].apply(lambda x: isinstance(x, str))):
raise TypeError(
'All values in column x_col={} must be strings.'.format(x_col))
# check labels are string if class_mode is binary or sparse
if self.class_mode in {'binary', 'sparse'}:
if not all(df[y_col].apply(lambda x: isinstance(x, str))):
raise TypeError('If class_mode="{}", y_col="{}" column '
'values must be strings.'.format(
self.class_mode, y_col))
# check that if binary there are only 2 different classes
if self.class_mode == 'binary':
if classes:
classes = set(classes)
if len(classes) != 2:
raise ValueError('If class_mode="binary" there must be 2 '
'classes. {} class/es were given.'.format(
len(classes)))
elif df[y_col].nunique() != 2:
raise ValueError('If class_mode="binary" there must be 2 classes. '
'Found {} classes.'.format(df[y_col].nunique()))
# check values are string, list or tuple if class_mode is categorical
if self.class_mode == 'categorical':
types = (str, list, tuple)
if not all(df[y_col].apply(lambda x: isinstance(x, types))):
raise TypeError('If class_mode="{}", y_col="{}" column '
'values must be type string, list or tuple.'.format(
self.class_mode, y_col))
# raise warning if classes are given but will be unused
if classes and self.class_mode in {'input', 'multi_output', 'raw', None}:
warnings.warn(
'`classes` will be ignored given the class_mode="{}"'.format(
self.class_mode))
# check that if weight column that the values are numerical
if weight_col and not issubclass(df[weight_col].dtype.type, np.number):
raise TypeError(
'Column weight_col={} must be numeric.'.format(weight_col))
def get_classes(self, df, y_col):
labels = []
for label in df[y_col]:
if isinstance(label, (list, tuple)):
labels.append([self.class_indices[lbl] for lbl in label])
else:
labels.append(self.class_indices[label])
return labels
@staticmethod
def _filter_classes(df, y_col, classes):
df = df.copy()
def remove_classes(labels, classes):
if isinstance(labels, (list, tuple)):
labels = [cls for cls in labels if cls in classes]
return labels or None
elif isinstance(labels, str):
return labels if labels in classes else None
else:
raise TypeError(
'Expect string, list or tuple but found {} in {} column '.format(
type(labels), y_col))
if classes:
# prepare for membership lookup
classes = list(collections.OrderedDict.fromkeys(classes).keys())
df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes))
else:
classes = set()
for v in df[y_col]:
if isinstance(v, (list, tuple)):
classes.update(v)
else:
classes.add(v)
classes = sorted(classes)
return df.dropna(subset=[y_col]), classes
def _filter_valid_filepaths(self, df, x_col):
"""Keep only dataframe rows with valid filenames.
Args:
df: Pandas dataframe containing filenames in a column
x_col: string, column in `df` that contains the filenames or filepaths
Returns:
absolute paths to image files
"""
filepaths = df[x_col].map(lambda fname: os.path.join(self.directory, fname))
mask = filepaths.apply(validate_filename, args=(self.white_list_formats,))
n_invalid = (~mask).sum()
if n_invalid:
warnings.warn('Found {} invalid image filename(s) in x_col="{}". '
'These filename(s) will be ignored.'.format(
n_invalid, x_col))
return df[mask]
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
if self.class_mode in {'multi_output', 'raw'}:
return self._targets
else:
return self.classes
@property
def sample_weight(self):
return self._sample_weight
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
@keras_export('keras.preprocessing.image.ImageDataGenerator')
class ImageDataGenerator():
"""Generate batches of tensor image data with real-time data augmentation.
Warning: `tf.keras.preprocessing.image.ImageDataGenerator` is not recommended
for new code. Prefer loading images with
`tf.keras.utils.image_dataset_from_directory` and transforming the output
`tf.data.Dataset` with preprocessing layers. For more information, see the
tutorials for [loading images](
https://www.tensorflow.org/tutorials/load_data/images) and
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
The data will be looped over (in batches).
Args:
featurewise_center: Boolean. Set input mean to 0 over the dataset,
feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean. Divide inputs by std of the
dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval `(-width_shift_range,
+width_shift_range)` - With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`, same as with `width_shift_range=[-1, 0,
+1]`, while with `width_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval `(-height_shift_range,
+height_shift_range)` - With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`, same as with `height_shift_range=[-1, 0,
+1]`, while with `height_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
brightness_range: Tuple or list of two floats. Range for picking a
brightness shift value from.
shear_range: Float. Shear Intensity (Shear angle in counter-clockwise
direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom. If a float,
`[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}. Default is
'nearest'. Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int. Value used for points outside the boundaries when
`fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is
applied, otherwise we multiply the data by the value provided (after
applying all other transformations).
preprocessing_function: function that will be applied on each input. The
function will run after the image is resized and augmented.
The function should take one argument: one image (Numpy tensor with
rank 3), and should output a Numpy tensor with the same shape.
data_format: Image data format, either "channels_first" or
"channels_last". "channels_last" mode means that the images should have
shape `(samples, height, width, channels)`, "channels_first" mode means
that the images should have shape `(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your Keras config
file at `~/.keras/keras.json`. If you never set it, then it will be
"channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
dtype: Dtype to use for the generated arrays.
Raises:
ValueError: If the value of the argument, `data_format` is other than
`"channels_last"` or `"channels_first"`.
ValueError: If the value of the argument, `validation_split` > 1
or `validation_split` < 0.
Examples:
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit(datagen.flow(x_train, y_train, batch_size=32,
subset='training'),
validation_data=datagen.flow(x_train, y_train,
batch_size=8, subset='validation'),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
interpolation_order=1,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.dtype = dtype
self.interpolation_order = interpolation_order
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError('`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.zca_whitening_matrix = None
if isinstance(zoom_range, (float, int)):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif (len(zoom_range) == 2 and
all(isinstance(val, (float, int)) for val in zoom_range)):
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % (zoom_range,))
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
if brightness_range is not None:
if (not isinstance(brightness_range, (tuple, list)) or
len(brightness_range) != 2):
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
self.brightness_range = brightness_range
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
ignore_class_split=False,
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
Args:
x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first
element should contain the images and the second element another numpy
array or a list of numpy arrays that gets passed to the output without
any modifications. Can be used to feed the model miscellaneous data
along with the images. In case of grayscale data, the channels axis of
the image array should have value 1, in case of RGB data, it should
have value 3, and in case of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str (default: `''`). Prefix to use for filenames of saved
pictures (only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif", "tif",
"jpg" (only relevant if `save_to_dir` is set). Default: "png".
ignore_class_split: Boolean (default: False), ignore difference
in number of classes in labels across train and validation
split (useful for non-classification tasks)
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
Returns:
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
Raises:
ValueError: If the Value of the argument, `subset` is other than
"training" or "validation".
"""
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
ignore_class_split=ignore_class_split,
subset=subset,
dtype=self.dtype)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
keep_aspect_ratio=False):
"""Takes the path to a directory & generates batches of augmented data.
Args:
directory: string, path to the target directory. It should contain one
subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside
each of the subdirectories directory tree will be included in the
generator. See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`, defaults to `(256,
256)`. The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None. If not provided, the list
of classes will be automatically inferred from the subdirectory
names/structure under `directory`, where each subdirectory will be
treated as a different class (and the order of the classes, which
will map to the label indices, will be alphanumeric). The
dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True) If set to False,
sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif", "tif",
"jpg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
Returns:
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
keep_aspect_ratio=keep_aspect_ratio,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dtype=self.dtype)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
**kwargs):
"""Takes the dataframe and the path to a directory + generates batches.
The generated batches contain augmented/normalized data.
**A simple tutorial can be found **[here](
http://bit.ly/keras_flow_from_dataframe).
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the
images in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must
include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class
or list/tuple if multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include
the given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain
the columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1 or 3 color channels.
classes: optional list of classes (e.g. `['dogs', 'cats']`). Default is
None. If not provided, the list of classes will be automatically
inferred from the `y_col`, which will map to the label indices, will
be alphanumeric). The dictionary containing the mapping from class
names to class indices can be obtained via the attribute
`class_indices`.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels.
Supports multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels, - `None`, no targets
are returned (the generator will only yield batches of image data,
which is useful to use in `model.predict()`).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif", "tif",
"jpg" (only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the execution of this function.
Defaults to `True`.
**kwargs: legacy arguments for raising deprecation warnings.
Returns:
A `DataFrameIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
if 'has_ext' in kwargs:
warnings.warn(
'has_ext is deprecated, filenames in the dataframe have '
'to match the exact filenames in disk.', DeprecationWarning)
if 'sort' in kwargs:
warnings.warn(
'sort is deprecated, batches will be created in the'
'same order than the filenames provided if shuffle'
'is set to False.', DeprecationWarning)
if class_mode == 'other':
warnings.warn(
'`class_mode` "other" is deprecated, please use '
'`class_mode` "raw".', DeprecationWarning)
class_mode = 'raw'
if 'drop_duplicates' in kwargs:
warnings.warn(
'drop_duplicates is deprecated, you can drop duplicates '
'by using the pandas.DataFrame.drop_duplicates method.',
DeprecationWarning)
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames,
dtype=self.dtype)
def standardize(self, x):
"""Applies the normalization configuration in-place to a batch of inputs.
`x` is changed in-place since the function is mainly used internally
to standardize images and feed them to your network. If a copy of `x`
would be created instead it would have a significant performance cost.
If you want to apply this method without changing the input in-place
you can call the method creating a copy before:
standardize(np.copy(x))
Args:
x: Batch of inputs to be normalized.
Returns:
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + 1e-6)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-6)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.zca_whitening_matrix is not None:
flat_x = x.reshape(-1, np.prod(x.shape[-3:]))
white_x = flat_x @ self.zca_whitening_matrix
x = np.reshape(white_x, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def get_random_transform(self, img_shape, seed=None):
"""Generates random parameters for a transformation.
Args:
img_shape: Tuple of integers.
Shape of the image that is transformed.
seed: Random seed.
Returns:
A dictionary containing randomly chosen parameters describing the
transformation.
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= img_shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= img_shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self.vertical_flip
channel_shift_intensity = None
if self.channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self.channel_shift_range,
self.channel_shift_range)
brightness = None
if self.brightness_range is not None:
brightness = np.random.uniform(self.brightness_range[0],
self.brightness_range[1])
transform_parameters = {
'theta': theta,
'tx': tx,
'ty': ty,
'shear': shear,
'zx': zx,
'zy': zy,
'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness
}
return transform_parameters
def apply_transform(self, x, transform_parameters):
"""Applies a transformation to an image according to given parameters.
Args:
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation.
Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intensity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
Returns:
A transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
x = apply_affine_transform(
x,
transform_parameters.get('theta', 0),
transform_parameters.get('tx', 0),
transform_parameters.get('ty', 0),
transform_parameters.get('shear', 0),
transform_parameters.get('zx', 1),
transform_parameters.get('zy', 1),
row_axis=img_row_axis,
col_axis=img_col_axis,
channel_axis=img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval,
order=self.interpolation_order)
if transform_parameters.get('channel_shift_intensity') is not None:
x = apply_channel_shift(x,
transform_parameters['channel_shift_intensity'],
img_channel_axis)
if transform_parameters.get('flip_horizontal', False):
x = flip_axis(x, img_col_axis)
if transform_parameters.get('flip_vertical', False):
x = flip_axis(x, img_row_axis)
if transform_parameters.get('brightness') is not None:
x = apply_brightness_shift(x, transform_parameters['brightness'], False)
return x
def random_transform(self, x, seed=None):
"""Applies a random transformation to an image.
Args:
x: 3D tensor, single image.
seed: Random seed.
Returns:
A randomly transformed version of the input (same shape).
"""
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
When `rescale` is set to a value, rescaling is applied to
sample data before computing the internal data stats.
Args:
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn('Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if self.rescale:
x *= self.rescale
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + 1e-6)
if self.zca_whitening:
n = len(x)
flat_x = np.reshape(x, (n, -1))
u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)
s_inv = np.sqrt(n) / (s + self.zca_epsilon)
self.zca_whitening_matrix = (u * s_inv).dot(u.T)
@keras_export('keras.preprocessing.image.random_rotation')
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
Warning: `tf.keras.preprocessing.image.random_rotation` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomRotation` which provides equivalent functionality as a
preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x,
theta=theta,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.random_shift')
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
Warning: `tf.keras.preprocessing.image.random_shift` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomTranslation` which provides equivalent functionality as
a preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x,
tx=tx,
ty=ty,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.random_shear')
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(
x,
shear=shear,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.random_zoom')
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
Warning: `tf.keras.preprocessing.image.random_zoom` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomZoom` which provides equivalent functionality as
a preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
Returns:
Zoomed Numpy image tensor.
Raises:
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(
x,
zx=zx,
zy=zy,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
@keras_export('keras.preprocessing.image.apply_channel_shift')
def apply_channel_shift(x, intensity, channel_axis=0):
"""Performs a channel shift.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
@keras_export('keras.preprocessing.image.random_channel_shift')
def random_channel_shift(x, intensity_range, channel_axis=0):
"""Performs a random channel shift.
Args:
x: Input tensor. Must be 3D.
intensity_range: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
"""
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
@keras_export('keras.preprocessing.image.apply_brightness_shift')
def apply_brightness_shift(x, brightness, scale=True):
"""Performs a brightness shift.
Args:
x: Input tensor. Must be 3D.
brightness: Float. The new brightness value.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Default: True.
Returns:
Numpy image tensor.
Raises:
ImportError: if PIL is not available.
"""
if ImageEnhance is None:
raise ImportError('Using brightness shifts requires PIL. '
'Install PIL or Pillow.')
x_min, x_max = np.min(x), np.max(x)
local_scale = (x_min < 0) or (x_max > 255)
x = array_to_img(x, scale=local_scale or scale)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
x = imgenhancer_Brightness.enhance(brightness)
x = img_to_array(x)
if not scale and local_scale:
x = x / 255 * (x_max - x_min) + x_min
return x
@keras_export('keras.preprocessing.image.random_brightness')
def random_brightness(x, brightness_range, scale=True):
"""Performs a random brightness shift.
Warning: `tf.keras.preprocessing.image.random_brightness` does not operate on
tensors and is not recommended for new code. Prefer
`tf.keras.layers.RandomBrightness` which provides equivalent functionality as
a preprocessing layer. For more information, see the tutorial for
[augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as
the [preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Default: True.
Returns:
Numpy image tensor.
Raises:
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
u = np.random.uniform(brightness_range[0], brightness_range[1])
return apply_brightness_shift(x, u, scale)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 - 0.5
o_y = float(y) / 2 - 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
@keras_export('keras.preprocessing.image.apply_affine_transform')
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
Args:
x: 3D numpy array - a 2D image with one or more channels.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows (aka Y axis) in the input
image. Direction: left to right.
col_axis: Index of axis for columns (aka X axis) in the input
image. Direction: top to bottom.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order: int, order of interpolation
Returns:
The transformed version of the input.
Raises:
ImportError: if SciPy is not available.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
# Input sanity checks:
# 1. x must 2D image with one or more channels (i.e., a 3D tensor)
# 2. channels must be either first or last dimension
if np.unique([row_axis, col_axis, channel_axis]).size != 3:
raise ValueError("'row_axis', 'col_axis', and 'channel_axis'"
" must be distinct")
# shall we support negative indices?
valid_indices = set([0, 1, 2])
actual_indices = set([row_axis, col_axis, channel_axis])
if actual_indices != valid_indices:
raise ValueError(
f'Invalid axis\' indices: {actual_indices - valid_indices}')
if x.ndim != 3:
raise ValueError('Input arrays must be multi-channel 2D images.')
if channel_axis not in [0, 2]:
raise ValueError('Channels are allowed and the first and last dimensions.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
# Matrix construction assumes that coordinates are x, y (in that order).
# However, regular numpy arrays use y,x (aka i,j) indexing.
# Possible solution is:
# 1. Swap the x and y axes.
# 2. Apply transform.
# 3. Swap the x and y axes again to restore image-like data ordering.
# Mathematically, it is equivalent to the following transformation:
# M' = PMP, where P is the permutation matrix, M is the original
# transformation matrix.
if col_axis > row_axis:
transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]
transform_matrix[[0, 1]] = transform_matrix[[1, 0]]
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform( # pylint: disable=g-complex-comprehension
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
| [
[
[
1484,
1495
],
[
50871,
50882
]
],
[
[
1503,
1505
],
[
9867,
9869
],
[
10095,
10097
]
],
[
[
1513,
1528
],
[
30735,
30750
]
],
[
[
1536,
1538
],
[
18081,
18083
],
[
18575,
18577
],
[
18621,
18623
],
[
18676,
18678
],
[
24042,
24044
],
[
30506,
30508
],
[
30541,
30543
],
[
30555,
30557
],
[
30925,
30927
],
[
31726,
31728
],
[
39220,
39222
],
[
39954,
39956
],
[
47232,
47234
],
[
16456,
16458
],
[
51595,
51597
]
],
[
[
1546,
1553
],
[
9936,
9943
],
[
9988,
9995
]
],
[
[
1561,
1570
],
[
13435,
13444
]
],
[
[
1578,
1586
],
[
7710,
7718
],
[
9579,
9587
],
[
16655,
16663
],
[
37450,
37458
],
[
49678,
49686
],
[
51765,
51773
],
[
63807,
63815
],
[
64073,
64081
],
[
64392,
64400
],
[
64715,
64723
],
[
79495,
79503
],
[
79680,
79688
],
[
79906,
79914
],
[
80101,
80109
],
[
81881,
81889
],
[
82244,
82252
],
[
82761,
82769
],
[
89631,
89639
]
],
[
[
1606,
1613
],
[
3556,
3563
],
[
3616,
3623
],
[
6005,
6012
],
[
6065,
6072
],
[
7539,
7546
],
[
29706,
29713
],
[
29770,
29777
],
[
34766,
34773
],
[
34830,
34837
],
[
61411,
61418
],
[
61475,
61482
]
],
[
[
1638,
1648
],
[
12181,
12191
]
],
[
[
1656,
1667
],
[
3781,
3783
],
[
4329,
4331
],
[
4351,
4353
],
[
4639,
4641
],
[
6358,
6360
],
[
13580,
13582
],
[
13644,
13646
],
[
13948,
13950
],
[
14609,
14611
],
[
22715,
22717
],
[
23964,
23966
],
[
24241,
24243
],
[
24444,
24446
],
[
31414,
31416
],
[
34977,
34979
],
[
35025,
35027
],
[
35641,
35643
],
[
35662,
35664
],
[
35969,
35971
],
[
35990,
35992
],
[
36357,
36359
],
[
36383,
36385
],
[
36409,
36411
],
[
36815,
36817
],
[
36970,
36972
],
[
37083,
37085
],
[
37984,
37986
],
[
38089,
38091
],
[
38487,
38489
],
[
39142,
39144
],
[
46703,
46705
],
[
49925,
49927
],
[
52273,
52275
],
[
81660,
81662
],
[
81740,
81742
],
[
82624,
82626
],
[
82712,
82714
],
[
83475,
83477
],
[
83539,
83541
],
[
83708,
83710
],
[
83764,
83766
],
[
83846,
83848
],
[
83955,
83957
],
[
84134,
84136
],
[
84189,
84191
],
[
84271,
84273
],
[
84347,
84349
],
[
84484,
84486
],
[
84671,
84673
],
[
84756,
84758
],
[
84826,
84828
],
[
84979,
84981
],
[
85184,
85186
],
[
89393,
89395
],
[
90205,
90207
],
[
90235,
90237
],
[
90319,
90321
],
[
90600,
90602
],
[
90777,
90779
],
[
90898,
90900
],
[
91073,
91075
],
[
91200,
91202
],
[
91240,
91242
],
[
91299,
91301
],
[
92856,
92858
],
[
94869,
94871
],
[
94909,
94911
],
[
96362,
96364
],
[
98415,
98417
],
[
99058,
99060
],
[
99107,
99109
],
[
99118,
99120
],
[
99155,
99157
],
[
99226,
99228
],
[
99265,
99267
],
[
99697,
99699
],
[
100458,
100460
],
[
100469,
100471
],
[
101950,
101952
],
[
102179,
102181
],
[
102244,
102246
],
[
102315,
102317
],
[
102322,
102324
],
[
103996,
103998
],
[
104680,
104682
],
[
104720,
104722
],
[
104731,
104733
],
[
104747,
104749
],
[
104799,
104801
],
[
104814,
104816
],
[
104961,
104963
],
[
105171,
105173
],
[
105240,
105242
],
[
105277,
105279
],
[
105292,
105294
],
[
105344,
105346
],
[
105510,
105512
],
[
105593,
105595
],
[
105800,
105802
],
[
106017,
106019
],
[
107039,
107041
],
[
107080,
107082
]
],
[
[
1713,
1725
],
[
2307,
2319
],
[
4923,
4935
],
[
6738,
6750
],
[
7891,
7903
],
[
12115,
12127
],
[
25629,
25641
],
[
32115,
32127
],
[
52364,
52376
],
[
91393,
91405
],
[
93269,
93281
],
[
95358,
95370
],
[
96635,
96647
],
[
98705,
98717
],
[
99315,
99327
],
[
99823,
99835
],
[
100791,
100803
],
[
102396,
102408
]
],
[
[
1741,
1746
],
[
103724,
103729
]
],
[
[
1767,
1773
]
],
[
[
1827,
1834
],
[
106794,
106801
]
],
[
[
1918,
1936
],
[
2034,
2043
],
[
2107,
2116
],
[
2144,
2153
],
[
2181,
2190
],
[
2217,
2226
],
[
2249,
2258
],
[
2281,
2290
],
[
3638,
3647
],
[
4454,
4463
],
[
4546,
4555
],
[
4729,
4738
],
[
4793,
4802
],
[
9712,
9721
],
[
9890,
9899
],
[
10080,
10089
]
],
[
[
1955,
1967
],
[
100308,
100320
],
[
100605,
100617
]
],
[
[
1990,
1999
],
[
2034,
2043
],
[
2107,
2116
],
[
2144,
2153
],
[
2181,
2190
],
[
2217,
2226
],
[
2249,
2258
],
[
2281,
2290
],
[
3638,
3647
],
[
4454,
4463
],
[
4546,
4555
],
[
4729,
4738
],
[
4793,
4802
],
[
9712,
9721
],
[
9890,
9899
],
[
10080,
10089
]
],
[
[
2009,
2021
],
[
100308,
100320
],
[
100605,
100617
]
],
[
[
2059,
2085
],
[
10872,
10898
],
[
11114,
11140
],
[
11168,
11194
]
],
[
[
2408,
2420
],
[
7575,
7587
],
[
23775,
23787
],
[
38953,
38965
],
[
100530,
100542
]
],
[
[
5024,
5036
],
[
23191,
23203
],
[
100687,
100699
]
],
[
[
6817,
6825
]
],
[
[
7970,
7978
],
[
22973,
22981
]
],
[
[
12172,
12180
],
[
25734,
25742
],
[
32201,
32209
],
[
40027,
40035
]
],
[
[
15927,
15944
],
[
18151,
18168
],
[
18383,
18400
]
],
[
[
16872,
16906
],
[
31030,
31064
]
],
[
[
18792,
18811
],
[
25713,
25732
],
[
40006,
40025
]
],
[
[
25695,
25712
],
[
73048,
73065
]
],
[
[
32182,
32200
],
[
67823,
67841
]
],
[
[
39592,
39609
],
[
51659,
51676
]
],
[
[
39988,
40005
],
[
80293,
80310
]
],
[
[
52247,
52256
],
[
87707,
87716
],
[
87802,
87811
]
],
[
[
52431,
52449
]
],
[
[
91455,
91470
]
],
[
[
93328,
93340
]
],
[
[
95417,
95429
]
],
[
[
96693,
96704
]
],
[
[
98771,
98790
],
[
87487,
87506
],
[
99759,
99778
]
],
[
[
99382,
99402
]
],
[
[
99892,
99914
],
[
87899,
87921
],
[
102019,
102041
]
],
[
[
100855,
100872
]
],
[
[
102061,
102091
],
[
105945,
105975
]
],
[
[
102465,
102487
],
[
86906,
86928
],
[
92889,
92911
],
[
94948,
94970
],
[
96409,
96431
],
[
98472,
98494
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.