hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae1c314367eda94502f3250be74ea3c895585272 | 52 | py | Python | tutorials/stock-wallet/microservices/wallet/src/commands/__init__.py | bhardwajRahul/minos-python | bad7a280ad92680abdeab01d1214688279cf6316 | [
"MIT"
] | 247 | 2022-01-24T14:55:30.000Z | 2022-03-25T12:06:17.000Z | tutorials/stock-wallet/microservices/wallet/src/commands/__init__.py | bhardwajRahul/minos-python | bad7a280ad92680abdeab01d1214688279cf6316 | [
"MIT"
] | 168 | 2022-01-24T14:54:31.000Z | 2022-03-31T09:31:09.000Z | tutorials/stock-wallet/microservices/wallet/src/commands/__init__.py | bhardwajRahul/minos-python | bad7a280ad92680abdeab01d1214688279cf6316 | [
"MIT"
] | 21 | 2022-02-06T17:25:58.000Z | 2022-03-27T04:50:29.000Z | from .services import (
WalletCommandService,
)
| 13 | 25 | 0.730769 |
91b98f54e60a18955447366f3b8df0621aa0d797 | 2,432 | py | Python | openr/py/openr/utils/tests/serializer_tests.py | jamesbj/openr | 878f7e74ec91b9407110e64e6d8a9f9499a335d5 | [
"MIT"
] | 1 | 2019-02-26T06:36:29.000Z | 2019-02-26T06:36:29.000Z | openr/py/openr/utils/tests/serializer_tests.py | flintfinally/openr | a5abf937f75ca757c9d4c17093c3d89ffbb28e09 | [
"MIT"
] | null | null | null | openr/py/openr/utils/tests/serializer_tests.py | flintfinally/openr | a5abf937f75ca757c9d4c17093c3d89ffbb28e09 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import random
import string
import unittest
from builtins import range
from openr.Lsdb import ttypes as lsdb_types
from openr.utils import serializer
from thrift.protocol.TJSONProtocol import TJSONProtocolFactory
class TestSerialization(unittest.TestCase):
def test_reverse_equality(self):
for _ in range(100):
thrift_obj = lsdb_types.PrefixDatabase()
random_string = "".join(random.choice(string.digits) for _ in range(10))
thrift_obj.thisNodeName = random_string
raw_msg = serializer.serialize_thrift_object(thrift_obj)
recovered_obj = serializer.deserialize_thrift_object(
raw_msg, lsdb_types.PrefixDatabase
)
self.assertEqual(thrift_obj, recovered_obj)
for _ in range(100):
thrift_obj = lsdb_types.PrefixDatabase()
random_string = "".join(random.choice(string.digits) for _ in range(10))
thrift_obj.thisNodeName = random_string
raw_msg = serializer.serialize_thrift_object(
thrift_obj, TJSONProtocolFactory
)
recovered_obj = serializer.deserialize_thrift_object(
raw_msg, lsdb_types.PrefixDatabase, TJSONProtocolFactory
)
self.assertEqual(thrift_obj, recovered_obj)
def test_thrifttype_sensitivity(self):
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
raw_msg = serializer.serialize_thrift_object(thrift_obj)
recovered_obj = serializer.deserialize_thrift_object(
raw_msg, lsdb_types.PrefixEntry
)
self.assertTrue(thrift_obj != recovered_obj)
def test_exception_handling(self):
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
raw_msg = serializer.serialize_thrift_object(thrift_obj)
# should raise exception due to inconsistency of protocol factor
with self.assertRaises(Exception):
serializer.deserialize_thrift_object(
raw_msg, lsdb_types.PrefixDatabase, TJSONProtocolFactory
)
| 38 | 84 | 0.695313 |
c8903add0c99f574198791b79f48983a3adc5052 | 1,411 | py | Python | molecule/alternative/tests/test_alternative.py | m1cka/ansible-grafana | 3cd6f9d3414ef5cf8ed6def8bcad1d654e2cce9e | [
"MIT"
] | 447 | 2017-11-27T18:07:59.000Z | 2022-03-28T09:15:42.000Z | molecule/alternative/tests/test_alternative.py | m1cka/ansible-grafana | 3cd6f9d3414ef5cf8ed6def8bcad1d654e2cce9e | [
"MIT"
] | 232 | 2017-12-02T20:00:47.000Z | 2022-03-30T06:25:37.000Z | molecule/alternative/tests/test_alternative.py | m1cka/ansible-grafana | 3cd6f9d3414ef5cf8ed6def8bcad1d654e2cce9e | [
"MIT"
] | 294 | 2017-12-14T22:01:40.000Z | 2022-03-31T18:41:19.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_directories(host):
dirs = [
"/etc/grafana",
"/var/log/grafana",
"/var/lib/grafana",
"/var/lib/grafana/dashboards",
"/var/lib/grafana/plugins",
"/var/lib/grafana/plugins/raintank-worldping-app"
]
files = [
"/etc/grafana/grafana.ini",
"/etc/grafana/ldap.toml"
]
for directory in dirs:
d = host.file(directory)
assert d.is_directory
assert d.exists
for file in files:
f = host.file(file)
assert f.exists
assert f.is_file
def test_service(host):
s = host.service("grafana-server")
# assert s.is_enabled
assert s.is_running
def test_packages(host):
p = host.package("grafana")
assert p.is_installed
assert p.version == "6.2.5"
def test_socket(host):
assert host.socket("tcp://127.0.0.1:3000").is_listening
def test_alternative_yum_repo(host):
if host.system_info.distribution in ['centos', 'redhat', 'fedora']:
f = host.file("/etc/yum.repos.d/alternative.grafana.yum.repo")
assert f.exists
def test_custom_auth_option(host):
f = host.file("/etc/grafana/grafana.ini")
assert f.contains("login_maximum_inactive_lifetime_days = 42")
| 25.196429 | 71 | 0.649894 |
052215bcbd2fb3b15cbab4b53fd49cdd0329c447 | 3,619 | py | Python | torchvision/utils.py | gpleiss/vision | 7a975284a88a3c722a92a7962180781df2acfa68 | [
"BSD-3-Clause"
] | 3 | 2019-10-29T02:38:55.000Z | 2020-12-17T07:54:42.000Z | torchvision/utils.py | fmassa/vision-1 | 7a975284a88a3c722a92a7962180781df2acfa68 | [
"BSD-3-Clause"
] | null | null | null | torchvision/utils.py | fmassa/vision-1 | 7a975284a88a3c722a92a7962180781df2acfa68 | [
"BSD-3-Clause"
] | null | null | null | import torch
import math
irange = range
def make_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False):
"""
Given a 4D mini-batch Tensor of shape (B x C x H x W),
or a list of images all of the same size,
makes a grid of images
normalize=True will shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
if range=(min, max) where min and max are numbers, then these numbers are used to
normalize the image.
scale_each=True will scale each image in the batch of images separately rather than
computing the (min, max) over all images.
[Example usage is given in this notebook](https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91)
"""
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensorlist = tensor
numImages = len(tensorlist)
size = torch.Size(torch.Size([long(numImages)]) + tensorlist[0].size())
tensor = tensorlist[0].new(size)
for i in irange(numImages):
tensor[i].copy_(tensorlist[i])
if tensor.dim() == 2: # single image H x W
tensor = tensor.view(1, tensor.size(0), tensor.size(1))
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
return tensor
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = torch.cat((tensor, tensor, tensor), 1)
if normalize is True:
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, t.min(), t.max())
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
grid = tensor.new(3, height * ymaps, width * xmaps).fill_(0)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid.narrow(1, y * height + 1 + padding // 2, height - padding)\
.narrow(2, x * width + 1 + padding // 2, width - padding)\
.copy_(tensor[k])
k = k + 1
return grid
def save_image(tensor, filename, nrow=8, padding=2,
normalize=False, range=None, scale_each=False):
"""
Saves a given Tensor into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images by calling `make_grid`.
All options after `filename` are passed through to `make_grid`. Refer to it's documentation for
more details
"""
from PIL import Image
tensor = tensor.cpu()
grid = make_grid(tensor, nrow=nrow, padding=padding,
normalize=normalize, range=range, scale_each=scale_each)
ndarr = grid.mul(255).byte().transpose(0, 2).transpose(0, 1).numpy()
im = Image.fromarray(ndarr)
im.save(filename)
| 37.697917 | 113 | 0.605692 |
3d525a6aae3aa9b7e9040c0ce82aa571165c0b2f | 16,260 | py | Python | main.py | Eclipi/PaDiM-EfficientNet | 738830a11db5bcb725407e9323177d565667a0f9 | [
"Apache-2.0"
] | null | null | null | main.py | Eclipi/PaDiM-EfficientNet | 738830a11db5bcb725407e9323177d565667a0f9 | [
"Apache-2.0"
] | null | null | null | main.py | Eclipi/PaDiM-EfficientNet | 738830a11db5bcb725407e9323177d565667a0f9 | [
"Apache-2.0"
] | null | null | null | # 1. import module
## utils
import random
import os
from random import sample
import numpy as np
import pickle
import time
import sys
import copy
import argparse
import warnings
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_auc_score, roc_curve, f1_score, accuracy_score, recall_score, precision_score, confusion_matrix, precision_recall_curve
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
## torch module
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
## eff model
from efficient_modified import EfficientNetModified
## mvtec datasets
import datasets.mvtec as mvtec
## filter warnings
warnings.filterwarnings('ignore')
# 2. choose device
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
# 3. argparse
def parse_args():
parser = argparse.ArgumentParser('PaDiM Parameters')
parser.add_argument('-d', '--data_path', type=str, required=True, help='mvtec data location')
parser.add_argument('-s', '--save_path', type=str, required=True, help='inference model & data location')
parser.add_argument('-a', '--arch', type=str, choices=['b0', 'b1', 'b4', 'b7'], default='b4')
parser.add_argument('-b', '--batch_size', type=int, default=32)
parser.add_argument('--training', type=lambda s: s.lower() in ['true', '1'])
parser.add_argument('--seed', type=int, default=1024)
parser.add_argument('--resize', type=int, default=256)
parser.add_argument('--cropsize', type=int, default=224)
parser.add_argument('--model_print', type=bool, default=False)
parser.add_argument('--img_print', type=bool, default=False)
return parser.parse_args()
# epoch, random_select size
def create_seed(filters):
random.seed(args.seed)
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed_all(args.seed)
def embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1/H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
def show_feat_list(model, size=(1, 3, 224, 224)):
sample_inputs = torch.zeros(size)
feat_list = model.extract_entire_features(sample_inputs)
for i, feat in enumerate(feat_list, 0):
print(i, feat.shape)
def denormalize(img):
mean = torch.tensor([0.485, 0.456, 0.406])
std = torch.tensor([0.229, 0.224, 0.225])
return img.mul_(std).add_(mean)
def show_sample_images(dataloader, n, class_name):
x, _, _ = next(iter(dataloader))
if x == None:
print('[error] dataloader empty!')
return
if n > args.batch_size:
print('[error] n exceeds batch size!')
return
rows = n//4
cols = 4
axes = []
fig = plt.figure(figsize=(20, 20), dpi=200)
for i in range(rows*cols):
axes.append(fig.add_subplot(rows, cols, i+1))
title = '%s subplot %d' % (class_name, i)
axes[-1].set_title(title)
axes[-1].imshow(denormalize(x[i].permute(1, 2, 0)))
fig.tight_layout()
pic_save_path = os.path.join(args.save_path, 'sample_%s' % (class_name))
fig.savefig(pic_save_path, dpi=200)
plt.show()
def calc_covinv(embedding_vectors, H, W, C):
for i in range(H * W):
yield np.linalg.inv(np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * np.identity(C))
def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
num = len(scores)
vmax = scores.max() * 255.
vmin = scores.min() * 255.
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
for i in range(num):
img = test_img[i]
img = (((img.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8) # denormalize
gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = scores[i] * 255
mask = scores[i]
mask[mask > threshold] = 1
mask[mask <= threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))
fig_img.subplots_adjust(right=0.9)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Image')
ax_img[1].imshow(gt, cmap='gray')
ax_img[1].title.set_text('GroundTruth')
ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)
ax_img[2].imshow(img, cmap='gray', interpolation='none')
ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax_img[2].title.set_text('Predicted heat map')
ax_img[3].imshow(mask, cmap='gray')
ax_img[3].title.set_text('Predicted mask')
ax_img[4].imshow(vis_img)
ax_img[4].title.set_text('Segmentation result')
left = 0.92
bottom = 0.15
width = 0.015
height = 1 - 2 * bottom
rect = [left, bottom, width, height]
cbar_ax = fig_img.add_axes(rect)
cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)
cb.ax.tick_params(labelsize=8)
font = {
'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 8,
}
cb.set_label('Anomaly Score', fontdict=font)
fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)
plt.close()
def main():
# make directory for saving data
os.makedirs(os.path.join(args.save_path, 'model_pkl_%s' % name), exist_ok=True)
# capture ROCAUC score
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
fig_img_rocauc = ax[0]
fig_pixel_rocauc = ax[1]
total_roc_auc = []
total_pixel_roc_auc = []
if args.arch == 'b0':
block_num = torch.tensor([3, 5, 11]) # b0
filters = (24 + 40 + 112) # 176
elif args.arch == 'b1':
# block_num = torch.tensor([3, 6, 9]) # b1 first, 24 + 40 + 80
# block_num = torch.tensor([4, 7, 13]) # b1 medium 24 + 40 + 112
block_num = torch.tensor([5, 8, 16]) # b1 last 24 + 40 + 112
filters = (24 + 40 + 112) # 176
elif args.arch == 'b4':
# block_num = torch.tensor([3, 7, 11]) # b4 (32 + 56 + 112)
block_num = torch.tensor([3, 7, 17]) # b4 (32 + 56 + 160)
# block_num = torch.tensor([5, 9, 13]) # (32 + 56 + 112)
# block_num = torch.tensor([5, 9, 20]) # b4 (32 + 56 + 160)
# block_num = torch.tensor([6, 10, 22]) # b4 (32 + 56 + 160)
filters = (32 + 56 + 160) # 248
elif args.arch == 'b7':
block_num = torch.tensor([11, 18, 38]) # b7 (48 + 80 + 224) # last
# block_num = torch.tensor([5, 12, 29]) # b7 (48 + 80 + 224) # first
# block_num = torch.tensor([8, 15, 33]) # medium
filters = (48 + 80 + 224) # 352
'''
The number of filters is so small that I want to take the entire filter, not randomly.
So I'm going to delete the random code this time.
'''
create_seed(filters)
# model attach to device
eff_model.to(device)
print('training: ', args.training)
for k, class_name in enumerate(mvtec.CLASS_NAMES):
train_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True)
test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, pin_memory=True)
train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
# plt.show vscode not work, so save fig
if args.img_print:
show_sample_images(train_dataloader, args.batch_size // 8, class_name)
# model_path
train_feature_filepath = os.path.join(args.save_path, 'model_pkl_%s' % name, 'train_%s.pkl' % class_name)
if args.training:
if os.path.exists(train_feature_filepath):
os.remove(train_feature_filepath)
eff_model.eval()
for (x, _, _) in tqdm(train_dataloader, '%d | feature extraction | train | %s |' % (k, class_name)):
with torch.no_grad():
feats = eff_model.extract_features(x.to(device), block_num.to(device))
# If you want to see the shape of the feature...
# for i, feat in enumerate(feats):
# print("layer {} feature's shape: {}".format(i, feat.shape))
for k, v in zip(train_outputs.keys(), feats):
train_outputs[k].append(v.cpu().detach())
for k, v in train_outputs.items():
train_outputs[k] = torch.cat(v, 0)
embedding_vectors = train_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
embedding_vectors = embedding_concat(embedding_vectors, train_outputs[layer_name])
B, C, H, W = embedding_vectors.size()
print("embedding vector's size: {}, {}, {}, {}".format(B, C, H, W))
embedding_vectors = embedding_vectors.view(B, C, H * W)
mean = torch.mean(embedding_vectors, dim=0).numpy()
cov_inv = torch.zeros(C, C, H * W).numpy()
I = np.identity(C)
# It's done with generator, but it doesn't matter what you do because there's not much memory difference.
# cc = calc_covinv(embedding_vectors, H, W, C)
# for i, value in enumerate(cc):
# cov_inv[:, :, i] = value
for i in range(H * W):
cov_inv[:, :, i] = np.linalg.inv(np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I)
# save learned distribution
train_outputs = [mean, cov_inv]
with open(train_feature_filepath, 'wb') as f:
pickle.dump(train_outputs, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
if not os.path.exists(train_feature_filepath):
print('train set feature file not exists: {}'.format(train_feature_filepath))
else:
print('load train set feat file from %s' % train_feature_filepath)
with open(train_feature_filepath, 'rb') as f:
train_outputs = pickle.load(f)
gt_list = []
gt_mask_list = []
test_imgs = []
# If you pass without picking a feature
# Depending on eval, the method of calculating bn, dropout etc varies.
eff_model.eval()
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
test_imgs.extend(x.cpu().detach().numpy())
gt_list.extend(y.cpu().detach().numpy())
gt_mask_list.extend(mask.cpu().detach().numpy())
with torch.no_grad():
feats = eff_model.extract_features(x.to(device), block_num.to(device))
for k, v in zip(test_outputs.keys(), feats):
test_outputs[k].append(v.cpu().detach())
for k, v in test_outputs.items():
test_outputs[k] = torch.cat(v, 0)
embedding_vectors = test_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])
inference_start = time.time()
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W).to(device)
mean = torch.Tensor(train_outputs[0]).to(device)
cov_inv = torch.Tensor(train_outputs[1]).to(device)
dist_list = torch.zeros(size=(H*W, B))
for i in range(H*W):
delta = embedding_vectors[:, :, i] - mean[:, i]
m_dist = torch.sqrt(torch.diag(torch.mm(torch.mm(delta, cov_inv[:, :, i]), delta.t())))
dist_list[i] = m_dist
dist_list = dist_list.transpose(1, 0).view(B, H, W)
score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear', align_corners=False).squeeze().cpu().numpy()
for i in range(score_map.shape[0]):
score_map[i] = gaussian_filter(score_map[i], sigma=4)
inference_time = time.time() - inference_start
print('{} inference time: {:.3f}'.format(class_name, inference_time))
# Normalization
max_score = score_map.max()
min_score = score_map.min()
scores = (score_map - min_score) / (max_score - min_score)
# calculate image-level ROC AUC score
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
fpr, tpr, img_thresholds = roc_curve(gt_list, img_scores)
img_roc_auc = roc_auc_score(gt_list, img_scores)
total_roc_auc.append(img_roc_auc)
print('image ROCAUC: %.3f' % (img_roc_auc))
fig_img_rocauc.plot(fpr, tpr, label='%s img_ROCAUC: %.3f' % (class_name, img_roc_auc))
# get optimal threshold based Label
distances = (tpr - 1.) ** 2 + fpr ** 2
img_threshold = img_thresholds[np.argmin(distances)]
gt_mask = np.asarray(gt_mask_list)
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
threshold = thresholds[np.argmax(f1)]
# label, mask two types threshold
print('label based threshold: {:.3f}, pixel based threshold: {:.3f}'.format(img_threshold, threshold))
# calculate per-pixel level ROCAUC
fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
total_pixel_roc_auc.append(per_pixel_rocauc)
print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc))
save_dir = args.save_path + '/' + f'pictures_efficientnet-{args.arch}'
os.makedirs(save_dir, exist_ok=True)
plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)
# class, image ROCAUC, pixel ROCAUC, inference_time
with open(args.save_path + '/' + f'efficientnet-{args.arch}-lst.txt', "a") as f:
f.write('{}-{:.3f}-{:.3f}-{:.3f}\n'.format(class_name, img_roc_auc, per_pixel_rocauc, inference_time))
print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.title.set_text('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.legend(loc="lower right")
print('Average pixel ROCUAC: %.3f' % np.mean(total_pixel_roc_auc))
fig_pixel_rocauc.title.set_text('Average pixel ROCAUC: %.3f' % np.mean(total_pixel_roc_auc))
fig_pixel_rocauc.legend(loc="lower right")
fig.tight_layout()
fig.savefig(os.path.join(args.save_path, '%s_lst_roc_curve.png' % name), dpi=100)
if __name__ == '__main__':
args = parse_args()
name = 'efficientnet-{}'.format(args.arch)
eff_model = EfficientNetModified.from_pretrained(name)
if args.model_print:
print(eff_model)
#show_feat_list(eff_model)
main()
# print(dir(eff_model))
# test code
# for i, (name, layer) in enumerate(eff_model.named_modules()):
# print(i, name)
# for i, block in enumerate(eff_model._blocks):
# print(i)
| 39.95086 | 151 | 0.611316 |
06fec3cb86fc17f43dd3990090d847ea8835629a | 9,861 | py | Python | PMBANet_TOPO/dataset.py | Alina-Mingchi/TOPO_final | a8983006929b60bda0ed1d2e9a9130427b628431 | [
"MIT"
] | null | null | null | PMBANet_TOPO/dataset.py | Alina-Mingchi/TOPO_final | a8983006929b60bda0ed1d2e9a9130427b628431 | [
"MIT"
] | null | null | null | PMBANet_TOPO/dataset.py | Alina-Mingchi/TOPO_final | a8983006929b60bda0ed1d2e9a9130427b628431 | [
"MIT"
] | null | null | null | import torch.utils.data as data
import torch
import numpy as np
import os
from os import listdir
from os.path import join
from PIL import Image
import random
from random import randrange
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"])
def load_img(filepath):
#img = Image.open(filepath).convert('RGB')
##############
img = Image.open(filepath)
#img = Image.open(filepath)
#y, _, _ = img.split()
return img
def get_patch(img_in, img_tar, patch_size, scale, ix=-1, iy=-1):
(c, ih, iw) = img_in.shape
####print('input:', ih, iw)
(th, tw) = (scale * ih, scale * iw)
patch_mult = scale #if len(scale) > 1 else 1
tp = patch_mult * patch_size
ip = tp // scale
if ix == -1:
ix = random.randrange(0, iw - ip + 1)
if iy == -1:
iy = random.randrange(0, ih - ip + 1)
(tx, ty) = (scale * ix, scale * iy)
img_in = img_in[:, iy:iy + ip, ix:ix + ip]
print('get_patch', img_tar.size(), ty, ty+tp, tx, tx+tp)
img_tar = img_tar[:, ty:ty + tp, tx:tx + tp]
info_patch = {
'ix': ix, 'iy': iy, 'ip': ip, 'tx': tx, 'ty': ty, 'tp': tp}
####print('after', img_tar.size())
return img_in, img_tar, info_patch
def augment(img_in,img_color, img_tar, flip_h=True, rot=True):
info_aug = {'flip_h': False, 'flip_v': False, 'trans': False}
if random.random() < 0.5 and flip_h:
####print('<-------------->', img_tar.size())
img_in = torch.from_numpy(img_in.numpy()[:, :, ::-1].copy())
img_color = torch.from_numpy(img_color.numpy()[:, :, ::-1].copy())
img_tar = torch.from_numpy(img_tar.numpy()[:, :, ::-1].copy())
info_aug['flip_h'] = True
if rot:
if random.random() < 0.5:
img_in = torch.from_numpy(img_in.numpy()[:, ::-1, :].copy())
img_color = torch.from_numpy(img_color.numpy()[:, ::-1, :].copy())
img_tar = torch.from_numpy(img_tar.numpy()[:, ::-1, :].copy())
info_aug['flip_v'] = True
if random.random() < 0.5:
img_in = torch.FloatTensor(np.transpose(img_in.numpy(),(0,2,1)))
img_color = torch.FloatTensor(np.transpose(img_color.numpy(),(0,2,1)))
img_tar = torch.FloatTensor(np.transpose(img_tar.numpy(),(0,2,1)))
info_aug['trans'] = True
return img_in,img_color,img_tar, info_aug
class DatasetFromFolder(data.Dataset):
def __init__(self, image_dir, lr_dir, rgb_dir, patch_size, upscale_factor, dataset, data_augmentation, input_transform=None, input_rgb_transform=None, target_transform=None):
super(DatasetFromFolder, self).__init__()
self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)]
self.lr_dir = lr_dir
self.rgb_dir = rgb_dir
self.patch_size = patch_size
self.upscale_factor = upscale_factor
self.dataset = dataset
self.input_transform = input_transform
self.input_rgb_transform = input_rgb_transform
self.target_transform = target_transform
self.data_augmentation = data_augmentation
def __getitem__(self, index):
target = load_img(self.image_filenames[index])
print(self.image_filenames[index], target.size)
_, file = os.path.split(self.image_filenames[index])
##### print('<==============>', self.dataset)
if self.dataset == 'DIV2K_train_LR_aug_x8':
input_lr = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x8.png'))
# elif self.dataset == 'DIV2K_train_LR_aug_x4':
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x4.png'))
# elif self.dataset == 'DIV2K_train_LR_aug_x2':
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# elif self.dataset == 'DIV2K_train_LR_difficult':
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x4d.png'))
# elif self.dataset == 'DIV2K_train_LR_mild':
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x4m.png'))
# elif self.dataset == 'DIV2K_train_LR_wild':
# set=randrange(1, 5)
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x4w'+str(set)+'.png'))
# #####
# elif self.dataset == 'DIV2K_train_LR_bicubicX2/X2/':
# ##### print('--------', self.lr_dir, os.path.splitext(file)[0])
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# #### print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# elif self.dataset == 'DIV2K_train_LR_bicubicX4/X4/':
# ##### print('--------', self.lr_dir, os.path.splitext(file)[0])
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x4.png'))
# #### print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# elif self.dataset == 'DIV2K_train_LR_unknownX2/X2/':
# ##### print('--------', self.lr_dir, os.path.splitext(file)[0])
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# #### print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# elif self.dataset == 'DIV2K_train_LR_unknownX3/X3/':
# ##### print('--------', self.lr_dir, os.path.splitext(file)[0])
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x3.png'))
# #### print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# elif self.dataset == 'DIV2K_train_LR_unknownX4/X4/':
# ##### print('--------', self.lr_dir, os.path.splitext(file)[0])
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x4.png'))
# #### print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
# elif self.dataset == 'DIV2K_train_LR_bicubicX3/X3/':
# ##### print('--------', self.lr_dir, os.path.splitext(file)[0])
# input = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x3.png'))
# #### print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'x2.png'))
######################
elif self.dataset == 'depth_map/data/L_pic128_x2/':
input_lr = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'.png'))
#print(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'.png'))
######################
#######***
elif self.dataset == 'train_all_depth_x8/':
input_rgb = load_img(os.path.join(self.rgb_dir, os.path.splitext(file)[0]+'.png'))
input_lr = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'.png'))
elif self.dataset == 'train_all_depth_x4/':
input_rgb = load_img(os.path.join(self.rgb_dir, os.path.splitext(file)[0]+'.png'))
input_lr = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'.png'))
elif self.dataset == 'train_all_depth_x2/':
input_rgb = load_img(os.path.join(self.rgb_dir, os.path.splitext(file)[0]+'.png'))
input_lr = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'.png'))
elif self.dataset == 'train_all_depth_x16/':
input_rgb = load_img(os.path.join(self.rgb_dir, os.path.splitext(file)[0]+'.png'))
input_lr = load_img(os.path.join(self.lr_dir,os.path.splitext(file)[0]+'.png'))
elif self.dataset == 'train_x8/':
input_rgb = load_img(os.path.join('/Users/felicity/Desktop/Super-resolution-drones-depth-images/PBMANet_TOPO/data/train_x8_rgb', os.path.splitext(file)[0]+'.png'))
print('RGB: '+str(input_rgb.shape))
input_lr = load_img(os.path.join('/Users/felicity/Desktop/Super-resolution-drones-depth-images/PBMANet_TOPO/data/train_x8_lr',os.path.splitext(file)[0]+'.png'))
print('lr: '+ str(input_lr.shape))
# if self.input_rgb_transform:
# input_rgb = self.input_rgb_transform(input_rgb)
# if self.input_transform:
# input_lr = self.input_transform(input)
# if self.target_transform:
# target = self.target_transform(target)
# print('target:', target.size())
# print('target:', target.size())
if self.data_augmentation:
input_lr,input_rgb,target, _ = augment(input_lr,input_rgb,target)
return input_rgb, input_lr, target
def __len__(self):
return len(self.image_filenames)
class DatasetFromFolderEval(data.Dataset):
def __init__(self, lr_dir,rgb_dir, input_transform=None,input_rgb_transform=None,target_transform=None):
super(DatasetFromFolderEval, self).__init__()
self.image_filenames = [join(lr_dir, x) for x in listdir(lr_dir) if is_image_file(x)]
self.input_transform = input_transform
self.input_rgb_transform = input_rgb_transform
self.target_transform = target_transform
self.lr_dir = lr_dir
self.rgb_dir = rgb_dir
def __getitem__(self, index):
input_lr = load_img(self.image_filenames[index])
_, file = os.path.split(self.image_filenames[index])
if self.lr_dir == './data/testx8/':
input_rgb = load_img(os.path.join(self.rgb_dir,os.path.splitext(file)[0]+'.png'))
if self.input_transform:
input_lr = self.input_transform(input_lr)
if self.input_rgb_transform:
input_rgb = self.input_rgb_transform(input_rgb)
return input_lr, input_rgb, file
def __len__(self):
return len(self.image_filenames)
| 46.514151 | 178 | 0.602981 |
2d4c8ae85ec8a72c2b240c3bd32e5e9359f4002a | 10,543 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20180601/connection_monitor.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20180601/connection_monitor.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/network/v20180601/connection_monitor.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ConnectionMonitor']
class ConnectionMonitor(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Information about the connection monitor.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_start: Determines if the connection monitor will start automatically once created.
:param pulumi.Input[str] connection_monitor_name: The name of the connection monitor.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']] destination: Describes the destination of connection monitor.
:param pulumi.Input[str] location: Connection monitor location.
:param pulumi.Input[int] monitoring_interval_in_seconds: Monitoring interval in seconds.
:param pulumi.Input[str] network_watcher_name: The name of the Network Watcher resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing Network Watcher.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']] source: Describes the source of connection monitor.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Connection monitor tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_start'] = auto_start
if connection_monitor_name is None:
raise TypeError("Missing required property 'connection_monitor_name'")
__props__['connection_monitor_name'] = connection_monitor_name
if destination is None:
raise TypeError("Missing required property 'destination'")
__props__['destination'] = destination
__props__['location'] = location
__props__['monitoring_interval_in_seconds'] = monitoring_interval_in_seconds
if network_watcher_name is None:
raise TypeError("Missing required property 'network_watcher_name'")
__props__['network_watcher_name'] = network_watcher_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if source is None:
raise TypeError("Missing required property 'source'")
__props__['source'] = source
__props__['tags'] = tags
__props__['etag'] = None
__props__['monitoring_status'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['start_time'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ConnectionMonitor")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ConnectionMonitor, __self__).__init__(
'azure-nextgen:network/v20180601:ConnectionMonitor',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConnectionMonitor':
"""
Get an existing ConnectionMonitor resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ConnectionMonitor(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> pulumi.Output[Optional[bool]]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@property
@pulumi.getter
def destination(self) -> pulumi.Output['outputs.ConnectionMonitorDestinationResponse']:
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[Optional[str]]:
"""
The monitoring status of the connection monitor.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the connection monitor.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the connection monitor.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output['outputs.ConnectionMonitorSourceResponse']:
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[Optional[str]]:
"""
The date and time when the connection monitor was started.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Connection monitor type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 49.266355 | 1,802 | 0.67846 |
314822762e3da09eb0492e6232daa27551cb69d0 | 1,615 | py | Python | dataset/augmentation.py | Creearc/Codenrock-New-Year-ML-Battle | 38dc89e45482cf6668cc0abe46f07b9d69db32a2 | [
"MIT"
] | 1 | 2022-01-07T16:57:34.000Z | 2022-01-07T16:57:34.000Z | dataset/augmentation.py | Creearc/Codenrock-New-Year-ML-Battle | 38dc89e45482cf6668cc0abe46f07b9d69db32a2 | [
"MIT"
] | null | null | null | dataset/augmentation.py | Creearc/Codenrock-New-Year-ML-Battle | 38dc89e45482cf6668cc0abe46f07b9d69db32a2 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
######################################################################
''' PARAMETERS '''
######################################################################
path = 'train'
img_shape = (448, 448)
split_matrix = (4, 4)
######################################################################
imgs = os.listdir(path)
##img_name = imgs[2]
##img = cv2.imread('{}/{}'.format(path, img_name))
##
##img = cv2.resize(img, img_shape, cv2.INTER_AREA)
##out = img.copy()
##
##hist = []
##for i in range(3):
## h = cv2.calcHist([img], [i], None, [256], [0,256])
## hist.append(h)
## print(np.median(h))
##
##path_shape = (img_shape[0] // split_matrix[0],
## img_shape[1] // split_matrix[1])
##
##for x in range(split_matrix[0]):
## for y in range(split_matrix[1]):
## cv2.rectangle(img,
## (x * path_shape[0], y * path_shape[1]),
## ((x + 1) * path_shape[0], (y + 1) * path_shape[1]),
## (255, 0, 0), 1)
##
##
##
##
##cv2.imshow('', img)
##cv2.waitKey(0)
##cv2.destroyAllWindows()
dataset_path = 'train_s'
f = open('smth_wild.csv', 'w')
for folder in os.listdir(dataset_path):
for file in os.listdir('{}/{}'.format(dataset_path, folder)):
img = cv2.imread('{}/{}/{}'.format(dataset_path, folder, file),
cv2.IMREAD_COLOR)
f.write('{}'.format(folder))
for i in range(3):
h = cv2.calcHist([img], [i], None, [256], [0,256])
h = np.median(h)
f.write(';{}'.format(h))
f.write('\n')
f.close()
| 24.846154 | 71 | 0.453251 |
ba6df220730e0dc296eb8bac4db80bf6a043bafb | 3,740 | py | Python | plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py | ken-ebert/indy-plenum | 7578798df456d06ffe24f488452fab3f1b3f00f1 | [
"Apache-2.0"
] | null | null | null | plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py | ken-ebert/indy-plenum | 7578798df456d06ffe24f488452fab3f1b3f00f1 | [
"Apache-2.0"
] | null | null | null | plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py | ken-ebert/indy-plenum | 7578798df456d06ffe24f488452fab3f1b3f00f1 | [
"Apache-2.0"
] | null | null | null | from plenum.test import waits
from plenum.test.helper import sdk_send_random_and_check
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node
from plenum.test.test_node import checkNodesConnected
CHK_FREQ = 5
LOG_SIZE = 3 * CHK_FREQ
def test_second_checkpoint_after_catchup_can_be_stabilized(
chkFreqPatched, looper, txnPoolNodeSet, sdk_wallet_steward,
sdk_wallet_client, sdk_pool_handle, tdir, tconf,
allPluginsPath):
_, new_node = sdk_add_new_steward_and_node(
looper, sdk_pool_handle, sdk_wallet_steward,
'EpsilonSteward', 'Epsilon', tdir, tconf,
allPluginsPath=allPluginsPath)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1],
exclude_from_check="check_last_ordered_3pc_backup")
# Epsilon did not participate in ordering of the batch with EpsilonSteward
# NYM transaction and the batch with Epsilon NODE transaction.
# Epsilon got these transactions via catch-up.
master_replica = new_node.replicas._master_replica
assert len(master_replica._checkpointer._checkpoint_state) == 0
assert len(master_replica._checkpointer._stashed_recvd_checkpoints) == 0
assert master_replica.h == 2
assert master_replica.H == 17
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
for replica in new_node.replicas.values():
assert len(replica._checkpointer._checkpoint_state) == 1
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
assert replica.h == 2
assert replica.H == 17
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 6)
stabilization_timeout = \
waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
looper.runFor(stabilization_timeout)
for replica in new_node.replicas.values():
assert len(replica._checkpointer._checkpoint_state) == 2
keys_iter = iter(replica._checkpointer._checkpoint_state)
assert next(keys_iter) == (3, 5)
assert replica._checkpointer._checkpoint_state[3, 5].seqNo == 5
assert replica._checkpointer._checkpoint_state[3, 5].digest is None
assert replica._checkpointer._checkpoint_state[3, 5].isStable is False
assert next(keys_iter) == (6, 10)
assert replica._checkpointer._checkpoint_state[6, 10].seqNo == 9
assert replica._checkpointer._checkpoint_state[6, 10].digest is None
assert replica._checkpointer._checkpoint_state[6, 10].isStable is False
# nothing is stashed since it's ordered during catch-up
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
assert replica.h == 2
assert replica.H == 17
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(stabilization_timeout)
for replica in new_node.replicas.values():
assert len(replica._checkpointer._checkpoint_state) == 1
keys_iter = iter(replica._checkpointer._checkpoint_state)
assert next(keys_iter) == (6, 10)
assert replica._checkpointer._checkpoint_state[6, 10].seqNo == 10
assert replica._checkpointer._checkpoint_state[6, 10].digest is not None
assert replica._checkpointer._checkpoint_state[6, 10].isStable is True
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
assert replica.h == 10
assert replica.H == 25
| 41.555556 | 80 | 0.72246 |
15f26f126f2c6165dd849cf156c3f7af1e2d95bf | 292 | py | Python | Django/users/urls.py | zarif007/Blog-site | e20e3f73fedbd7acb2f3d22398c36f3dcd4f88b4 | [
"MIT"
] | 1 | 2021-03-15T22:28:26.000Z | 2021-03-15T22:28:26.000Z | Django/users/urls.py | zarif007/Blog-site | e20e3f73fedbd7acb2f3d22398c36f3dcd4f88b4 | [
"MIT"
] | null | null | null | Django/users/urls.py | zarif007/Blog-site | e20e3f73fedbd7acb2f3d22398c36f3dcd4f88b4 | [
"MIT"
] | null | null | null |
from django.urls import path
from .views import CustomUserCreate, BlacklistTokenUpdateView
app_name = 'users'
urlpatterns = [
path('create/', CustomUserCreate.as_view(), name="create_user"),
path('logout/blacklist/', BlacklistTokenUpdateView.as_view(),
name='blacklist')
] | 26.545455 | 68 | 0.732877 |
fc4f9f615f696fd7084207acf75320326b2d95e4 | 2,003 | py | Python | Ene-Jun-2021/perez-gutierrez-julio-cesar/Examen Extraordinario/Ejercicio-2/triangulo.py | jarmarj/DAS_Sistemas | 36c876673e7abae503cc137c3f66585a0e45ed79 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ene-Jun-2021/perez-gutierrez-julio-cesar/Examen Extraordinario/Ejercicio-2/triangulo.py | jarmarj/DAS_Sistemas | 36c876673e7abae503cc137c3f66585a0e45ed79 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ene-Jun-2021/perez-gutierrez-julio-cesar/Examen Extraordinario/Ejercicio-2/triangulo.py | jarmarj/DAS_Sistemas | 36c876673e7abae503cc137c3f66585a0e45ed79 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | from abc import ABC, abstractmethod
class Triangulo(ABC):
def __init__(self, angulo1, angulo2, angulo3, lado1, lado2, lado3):
self.angulo1 = angulo1
self.angulo2 = angulo2
self.angulo3 = angulo3
self.lado1 = lado1
self.lado2 = lado2
self.lado3 = lado3
self.num_de_lados = 3
def __str__(self):
return f"Angulo 1 = {self.angulo1}\nAngulo 2 = {self.angulo2}\n" \
f"Angulo 3 = {self.angulo3}\nLado 1 = {self.lado1}\n" \
f"Lado 2 = {self.lado2}\nLado 3 = {self.lado3}\nValidar : {self.verificar_triangulo()}"
def verificar_angulos(self):
return self.angulo1 + self.angulo2 + self.angulo3 == 180
@abstractmethod
def verificar_triangulo(self) -> bool:
pass
class Equilatero(Triangulo):
def verificar_triangulo(self):
return self.lado1 == self.lado2 and self.lado2 == self.lado3
class Isoceles(Triangulo):
def verificar_triangulo(self):
if self.lado1 == self.lado2 and self.lado2 == self.lado3:
return False
elif(self.lado1==self.lado2 or self.lado1==self.lado3 or self.lado2==self.lado3):
return True
else: return False
class Escaleno(Triangulo):
def verificar_triangulo(self):
if self.lado1 == self.lado2 and self.lado2 == self.lado3:
return False
elif(self.lado1 == self.lado2 or self.lado1 == self.lado3 or self.lado2 == self.lado3):
return False
elif (self.lado1 != self.lado2 or self.lado1 != self.lado3 or self.lado2 != self.lado3):
return True
if __name__ == '__main__':
################_angulos_#_lados_###########
equilatero = Equilatero(90,60,30,10,10,10)
iso = Isoceles(90,60,30,10,10,20)
escaleno = Escaleno(90,60,30,10,20,30)
print(equilatero.verificar_triangulo())
print(iso.verificar_triangulo())
print(escaleno.verificar_triangulo())
| 36.418182 | 105 | 0.607589 |
faab1fe2cdc7f651b0aa16e5d9b11d5c4991baac | 33,941 | py | Python | app_venv/Lib/site-packages/mysql/connector/cursor_cext.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | app_venv/Lib/site-packages/mysql/connector/cursor_cext.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | app_venv/Lib/site-packages/mysql/connector/cursor_cext.py | orlandofv/sianna | f07dd6dbc62a9604f31ab800e482e62f14fba766 | [
"MIT"
] | null | null | null | # Copyright (c) 2014, 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Cursor classes using the C Extension
"""
from collections import namedtuple
import re
import weakref
from _mysql_connector import MySQLInterfaceError # pylint: disable=F0401,E0611
from .abstracts import (MySQLConnectionAbstract, MySQLCursorAbstract,
NAMED_TUPLE_CACHE)
from . import errors
from .errorcode import CR_NO_RESULT_SET
from .cursor import (
RE_PY_PARAM, RE_SQL_INSERT_STMT,
RE_SQL_ON_DUPLICATE, RE_SQL_COMMENT, RE_SQL_INSERT_VALUES,
RE_SQL_SPLIT_STMTS, RE_SQL_FIND_PARAM
)
ERR_NO_RESULT_TO_FETCH = "No result set to fetch from"
class _ParamSubstitutor(object):
"""
Substitutes parameters into SQL statement.
"""
def __init__(self, params):
self.params = params
self.index = 0
def __call__(self, matchobj):
index = self.index
self.index += 1
try:
return self.params[index]
except IndexError:
raise errors.ProgrammingError(
"Not enough parameters for the SQL statement")
@property
def remaining(self):
"""Returns number of parameters remaining to be substituted"""
return len(self.params) - self.index
class CMySQLCursor(MySQLCursorAbstract):
"""Default cursor for interacting with MySQL using C Extension"""
_raw = False
_buffered = False
_raw_as_string = False
def __init__(self, connection):
"""Initialize"""
MySQLCursorAbstract.__init__(self)
self._insert_id = 0
self._warning_count = 0
self._warnings = None
self._affected_rows = -1
self._rowcount = -1
self._nextrow = (None, None)
self._executed = None
self._executed_list = []
self._stored_results = []
if not isinstance(connection, MySQLConnectionAbstract):
raise errors.InterfaceError(errno=2048)
self._cnx = weakref.proxy(connection)
def reset(self, free=True):
"""Reset the cursor
When free is True (default) the result will be freed.
"""
self._rowcount = -1
self._nextrow = None
self._affected_rows = -1
self._insert_id = 0
self._warning_count = 0
self._warnings = None
self._warnings = None
self._warning_count = 0
self._description = None
self._executed_list = []
if free and self._cnx:
self._cnx.free_result()
super(CMySQLCursor, self).reset()
def _check_executed(self):
"""Check if the statement has been executed.
Raises an error if the statement has not been executed.
"""
if self._executed is None:
raise errors.InterfaceError(ERR_NO_RESULT_TO_FETCH)
def _fetch_warnings(self):
"""Fetch warnings
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
Raises errors.Error (or subclass) on errors.
Returns list of tuples or None.
"""
warnings = []
try:
# force freeing result
self._cnx.consume_results()
_ = self._cnx.cmd_query("SHOW WARNINGS")
warnings = self._cnx.get_rows()[0]
self._cnx.consume_results()
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
except Exception as err:
raise errors.InterfaceError(
"Failed getting warnings; {0}".format(str(err)))
if warnings:
return warnings
return None
def _handle_warnings(self):
"""Handle possible warnings after all results are consumed"""
if self._cnx.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_result(self, result):
"""Handles the result after statement execution"""
if 'columns' in result:
self._description = result['columns']
self._rowcount = 0
self._handle_resultset()
else:
self._insert_id = result['insert_id']
self._warning_count = result['warning_count']
self._affected_rows = result['affected_rows']
self._rowcount = -1
self._handle_warnings()
if self._cnx.raise_on_warnings is True and self._warnings:
raise errors.get_mysql_exception(*self._warnings[0][1:3])
def _handle_resultset(self):
"""Handle a result set"""
pass
def _handle_eof(self):
"""Handle end of reading the result
Raises an errors.Error on errors.
"""
self._warning_count = self._cnx.warning_count
self._handle_warnings()
if self._cnx.raise_on_warnings is True and self._warnings:
raise errors.get_mysql_exception(*self._warnings[0][1:3])
if not self._cnx.more_results:
self._cnx.free_result()
def _execute_iter(self):
"""Generator returns MySQLCursor objects for multiple statements
Deprecated: use nextset() method directly.
This method is only used when multiple statements are executed
by the execute() method. It uses zip() to make an iterator from the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
"""
executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
i = 0
self._executed = executed_list[i]
yield self
while True:
try:
if not self.nextset():
raise StopIteration
except errors.InterfaceError as exc:
# Result without result set
if exc.errno != CR_NO_RESULT_SET:
raise
except StopIteration:
return
i += 1
try:
self._executed = executed_list[i].strip()
except IndexError:
self._executed = executed_list[0]
yield self
return
def execute(self, operation, params=(), multi=False):
"""Execute given statement using given parameters
Deprecated: The multi argument is not needed and nextset() should
be used to handle multiple result sets.
"""
if not operation:
return None
if not self._cnx or self._cnx.is_closed():
raise errors.ProgrammingError("Cursor is not connected", 2055)
self._cnx.handle_unread_result()
stmt = ''
self.reset()
try:
if isinstance(operation, str):
stmt = operation.encode(self._cnx.python_charset)
else:
stmt = operation
except (UnicodeDecodeError, UnicodeEncodeError) as err:
raise errors.ProgrammingError(str(err))
if params:
prepared = self._cnx.prepare_for_mysql(params)
if isinstance(prepared, dict):
for key, value in prepared.items():
stmt = stmt.replace("%({0})s".format(key).encode(), value)
elif isinstance(prepared, (list, tuple)):
psub = _ParamSubstitutor(prepared)
stmt = RE_PY_PARAM.sub(psub, stmt)
if psub.remaining != 0:
raise errors.ProgrammingError(
"Not all parameters were used in the SQL statement")
try:
result = self._cnx.cmd_query(stmt, raw=self._raw,
buffered=self._buffered,
raw_as_string=self._raw_as_string)
except MySQLInterfaceError as exc:
raise errors.get_mysql_exception(msg=exc.msg, errno=exc.errno,
sqlstate=exc.sqlstate)
self._executed = stmt
self._handle_result(result)
if multi:
return self._execute_iter()
return None
def _batch_insert(self, operation, seq_params):
"""Implements multi row insert"""
def remove_comments(match):
"""Remove comments from INSERT statements.
This function is used while removing comments from INSERT
statements. If the matched string is a comment not enclosed
by quotes, it returns an empty string, else the string itself.
"""
if match.group(1):
return ""
return match.group(2)
tmp = re.sub(RE_SQL_ON_DUPLICATE, '',
re.sub(RE_SQL_COMMENT, remove_comments, operation))
matches = re.search(RE_SQL_INSERT_VALUES, tmp)
if not matches:
raise errors.InterfaceError(
"Failed rewriting statement for multi-row INSERT. "
"Check SQL syntax."
)
fmt = matches.group(1).encode(self._cnx.python_charset)
values = []
try:
stmt = operation.encode(self._cnx.python_charset)
for params in seq_params:
tmp = fmt
prepared = self._cnx.prepare_for_mysql(params)
if isinstance(prepared, dict):
for key, value in prepared.items():
tmp = tmp.replace("%({0})s".format(key).encode(), value)
elif isinstance(prepared, (list, tuple)):
psub = _ParamSubstitutor(prepared)
tmp = RE_PY_PARAM.sub(psub, tmp)
if psub.remaining != 0:
raise errors.ProgrammingError(
"Not all parameters were used in the SQL statement")
values.append(tmp)
if fmt in stmt:
stmt = stmt.replace(fmt, b','.join(values), 1)
self._executed = stmt
return stmt
return None
except (UnicodeDecodeError, UnicodeEncodeError) as err:
raise errors.ProgrammingError(str(err))
except Exception as err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times"""
if not operation or not seq_params:
return None
if not self._cnx:
raise errors.ProgrammingError("Cursor is not connected")
self._cnx.handle_unread_result()
if not isinstance(seq_params, (list, tuple)):
raise errors.ProgrammingError(
"Parameters for query must be list or tuple.")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT, operation):
if not seq_params:
self._rowcount = 0
return None
stmt = self._batch_insert(operation, seq_params)
if stmt is not None:
self._executed = stmt
return self.execute(stmt)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
try:
while True:
if self._description:
rowcnt += len(self._cnx.get_rows()[0])
else:
rowcnt += self._affected_rows
if not self.nextset():
break
except StopIteration:
# No more results
pass
except (ValueError, TypeError) as err:
raise errors.ProgrammingError(
"Failed executing the operation; {0}".format(err))
self._rowcount = rowcnt
return None
@property
def description(self):
"""Returns description of columns in a result"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected"""
if self._rowcount == -1:
return self._affected_rows
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column"""
return self._insert_id
def close(self):
"""Close the cursor
The result will be freed.
"""
if not self._cnx:
return False
self._cnx.handle_unread_result()
self._warnings = None
self._cnx = None
return True
def callproc(self, procname, args=()):
"""Calls a stored procedure with the given arguments"""
if not procname or not isinstance(procname, str):
raise ValueError("procname must be a string")
if not isinstance(args, (tuple, list)):
raise ValueError("args must be a sequence")
argfmt = "@_{name}_arg{index}"
self._stored_results = []
try:
argnames = []
argtypes = []
if args:
for idx, arg in enumerate(args):
argname = argfmt.format(name=procname, index=idx + 1)
argnames.append(argname)
if isinstance(arg, tuple):
argtypes.append(" CAST({0} AS {1})".format(argname,
arg[1]))
self.execute("SET {0}=%s".format(argname), (arg[0],))
else:
argtypes.append(argname)
self.execute("SET {0}=%s".format(argname), (arg,))
call = "CALL {0}({1})".format(procname, ','.join(argnames))
result = self._cnx.cmd_query(call, raw=self._raw,
raw_as_string=self._raw_as_string)
results = []
while self._cnx.result_set_available:
result = self._cnx.fetch_eof_columns()
# pylint: disable=W0212
if isinstance(self, (CMySQLCursorDict,
CMySQLCursorBufferedDict)):
cursor_class = CMySQLCursorBufferedDict
elif isinstance(self, (CMySQLCursorNamedTuple,
CMySQLCursorBufferedNamedTuple)):
cursor_class = CMySQLCursorBufferedNamedTuple
elif self._raw:
cursor_class = CMySQLCursorBufferedRaw
else:
cursor_class = CMySQLCursorBuffered
cur = cursor_class(self._cnx._get_self())
cur._executed = "(a result of {0})".format(call)
cur._handle_result(result)
# pylint: enable=W0212
results.append(cur)
self._cnx.next_result()
self._stored_results = results
self._handle_eof()
if argnames:
self.reset()
# Create names aliases to be compatible with namedtuples
args = [
"{} AS {}".format(name, alias) for name, alias in
zip(argtypes, [arg.lstrip("@_") for arg in argnames])
]
select = "SELECT {}".format(",".join(args))
self.execute(select)
return self.fetchone()
return tuple()
except errors.Error:
raise
except Exception as err:
raise errors.InterfaceError(
"Failed calling stored routine; {0}".format(err))
def nextset(self):
"""Skip to the next available result set"""
if not self._cnx.next_result():
self.reset(free=True)
return None
self.reset(free=False)
if not self._cnx.result_set_available:
eof = self._cnx.fetch_eof_status()
self._handle_result(eof)
raise errors.InterfaceError(errno=CR_NO_RESULT_SET)
self._handle_result(self._cnx.fetch_eof_columns())
return True
def fetchall(self):
"""Returns all rows of a query result set
Returns a list of tuples.
"""
self._check_executed()
if not self._cnx.unread_result:
return []
rows = self._cnx.get_rows()
if self._nextrow and self._nextrow[0]:
rows[0].insert(0, self._nextrow[0])
if not rows[0]:
self._handle_eof()
return []
self._rowcount += len(rows[0])
self._handle_eof()
#self._cnx.handle_unread_result()
return rows[0]
def fetchmany(self, size=1):
"""Returns the next set of rows of a result set"""
self._check_executed()
if self._nextrow and self._nextrow[0]:
rows = [self._nextrow[0]]
size -= 1
else:
rows = []
if size and self._cnx.unread_result:
rows.extend(self._cnx.get_rows(size)[0])
if size:
if self._cnx.unread_result:
self._nextrow = self._cnx.get_row()
if self._nextrow and not self._nextrow[0] and \
not self._cnx.more_results:
self._cnx.free_result()
else:
self._nextrow = (None, None)
if not rows:
self._handle_eof()
return []
self._rowcount += len(rows)
return rows
def fetchone(self):
"""Returns next row of a query result set"""
self._check_executed()
row = self._nextrow
if not row and self._cnx.unread_result:
row = self._cnx.get_row()
if row and row[0]:
self._nextrow = self._cnx.get_row()
if not self._nextrow[0] and not self._cnx.more_results:
self._cnx.free_result()
else:
self._handle_eof()
return None
self._rowcount += 1
return row[0]
def __iter__(self):
"""Iteration over the result set
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
for i in range(len(self._stored_results)):
yield self._stored_results[i]
self._stored_results = []
def __next__(self):
"""Iteration over the result set
Used for iterating over the result set. Calls self.fetchone()
to get the next row.
Raises StopIteration when no more rows are available.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple([d[0] for d in self.description])
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
try:
return self._executed.strip().decode('utf8')
except AttributeError:
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if self.description:
return True
return False
def __str__(self):
fmt = "{class_name}: {stmt}"
if self._executed:
try:
executed = self._executed.decode('utf-8')
except AttributeError:
executed = self._executed
if len(executed) > 40:
executed = executed[:40] + '..'
else:
executed = '(Nothing executed yet)'
return fmt.format(class_name=self.__class__.__name__, stmt=executed)
class CMySQLCursorBuffered(CMySQLCursor):
"""Cursor using C Extension buffering results"""
def __init__(self, connection):
"""Initialize"""
super(CMySQLCursorBuffered, self).__init__(connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
"""Handle a result set"""
self._rows = self._cnx.get_rows()[0]
self._next_row = 0
self._rowcount = len(self._rows)
self._handle_eof()
def reset(self, free=True):
"""Reset the cursor to default"""
self._rows = None
self._next_row = 0
super(CMySQLCursorBuffered, self).reset(free=free)
def _fetch_row(self):
"""Returns the next row in the result set
Returns a tuple or None.
"""
row = None
try:
row = self._rows[self._next_row]
except IndexError:
return None
else:
self._next_row += 1
return row
def fetchall(self):
self._check_executed()
res = self._rows[self._next_row:]
self._next_row = len(self._rows)
return res
def fetchmany(self, size=1):
self._check_executed()
res = []
cnt = size or self.arraysize
while cnt > 0:
cnt -= 1
row = self._fetch_row()
if row:
res.append(row)
else:
break
return res
def fetchone(self):
self._check_executed()
return self._fetch_row()
class CMySQLCursorRaw(CMySQLCursor):
"""Cursor using C Extension return raw results"""
_raw = True
class CMySQLCursorBufferedRaw(CMySQLCursorBuffered):
"""Cursor using C Extension buffering raw results"""
_raw = True
class CMySQLCursorDict(CMySQLCursor):
"""Cursor using C Extension returning rows as dictionaries"""
_raw = False
def fetchone(self):
"""Returns all rows of a query result set
"""
row = super(CMySQLCursorDict, self).fetchone()
if row:
return dict(zip(self.column_names, row))
return None
def fetchmany(self, size=1):
"""Returns next set of rows as list of dictionaries"""
res = super(CMySQLCursorDict, self).fetchmany(size=size)
return [dict(zip(self.column_names, row)) for row in res]
def fetchall(self):
"""Returns all rows of a query result set as list of dictionaries"""
res = super(CMySQLCursorDict, self).fetchall()
return [dict(zip(self.column_names, row)) for row in res]
class CMySQLCursorBufferedDict(CMySQLCursorBuffered):
"""Cursor using C Extension buffering and returning rows as dictionaries"""
_raw = False
def _fetch_row(self):
row = super(CMySQLCursorBufferedDict, self)._fetch_row()
if row:
return dict(zip(self.column_names, row))
return None
def fetchall(self):
res = super(CMySQLCursorBufferedDict, self).fetchall()
return [dict(zip(self.column_names, row)) for row in res]
class CMySQLCursorNamedTuple(CMySQLCursor):
"""Cursor using C Extension returning rows as named tuples"""
def _handle_resultset(self):
"""Handle a result set"""
super(CMySQLCursorNamedTuple, self)._handle_resultset()
# pylint: disable=W0201
columns = tuple(self.column_names)
try:
self.named_tuple = NAMED_TUPLE_CACHE[columns]
except KeyError:
self.named_tuple = namedtuple('Row', columns)
NAMED_TUPLE_CACHE[columns] = self.named_tuple
# pylint: enable=W0201
def fetchone(self):
"""Returns all rows of a query result set
"""
row = super(CMySQLCursorNamedTuple, self).fetchone()
if row:
return self.named_tuple(*row)
return None
def fetchmany(self, size=1):
"""Returns next set of rows as list of named tuples"""
res = super(CMySQLCursorNamedTuple, self).fetchmany(size=size)
if not res:
return []
return [self.named_tuple(*res[0])]
def fetchall(self):
"""Returns all rows of a query result set as list of named tuples"""
res = super(CMySQLCursorNamedTuple, self).fetchall()
return [self.named_tuple(*row) for row in res]
class CMySQLCursorBufferedNamedTuple(CMySQLCursorBuffered):
"""Cursor using C Extension buffering and returning rows as named tuples"""
def _handle_resultset(self):
super(CMySQLCursorBufferedNamedTuple, self)._handle_resultset()
# pylint: disable=W0201
self.named_tuple = namedtuple('Row', self.column_names)
# pylint: enable=W0201
def _fetch_row(self):
row = super(CMySQLCursorBufferedNamedTuple, self)._fetch_row()
if row:
return self.named_tuple(*row)
return None
def fetchall(self):
res = super(CMySQLCursorBufferedNamedTuple, self).fetchall()
return [self.named_tuple(*row) for row in res]
class CMySQLCursorPrepared(CMySQLCursor):
"""Cursor using MySQL Prepared Statements"""
def __init__(self, connection):
super(CMySQLCursorPrepared, self).__init__(connection)
self._rows = None
self._rowcount = 0
self._next_row = 0
self._binary = True
self._stmt = None
def _handle_eof(self):
"""Handle EOF packet"""
self._nextrow = (None, None)
self._handle_warnings()
if self._cnx.raise_on_warnings is True and self._warnings:
raise errors.get_mysql_exception(
self._warnings[0][1], self._warnings[0][2])
def _fetch_row(self, raw=False):
"""Returns the next row in the result set
Returns a tuple or None.
"""
if not self._stmt or not self._stmt.have_result_set:
return None
row = None
if self._nextrow == (None, None):
(row, eof) = self._cnx.get_row(
binary=self._binary, columns=self.description, raw=raw,
prep_stmt=self._stmt)
else:
(row, eof) = self._nextrow
if row:
self._nextrow = self._cnx.get_row(
binary=self._binary, columns=self.description, raw=raw,
prep_stmt=self._stmt)
eof = self._nextrow[1]
if eof is not None:
self._warning_count = eof["warning_count"]
self._handle_eof()
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._warning_count = eof["warning_count"]
self._handle_eof()
return row
def callproc(self, procname, args=None):
"""Calls a stored procedue
Not supported with CMySQLCursorPrepared.
"""
raise errors.NotSupportedError()
def close(self):
"""Close the cursor
This method will try to deallocate the prepared statement and close
the cursor.
"""
if self._stmt:
self.reset()
self._cnx.cmd_stmt_close(self._stmt)
self._stmt = None
super(CMySQLCursorPrepared, self).close()
def reset(self, free=True):
"""Resets the prepared statement."""
if self._stmt:
self._cnx.cmd_stmt_reset(self._stmt)
super(CMySQLCursorPrepared, self).reset(free=free)
def execute(self, operation, params=None, multi=False): # multi is unused
"""Prepare and execute a MySQL Prepared Statement
This method will prepare the given operation and execute it using
the given parameters.
If the cursor instance already had a prepared statement, it is
first closed.
"""
if not operation:
return
if not self._cnx or self._cnx.is_closed():
raise errors.ProgrammingError("Cursor is not connected", 2055)
self._cnx.handle_unread_result(prepared=True)
if operation is not self._executed:
if self._stmt:
self._cnx.cmd_stmt_close(self._stmt)
self._executed = operation
try:
if not isinstance(operation, bytes):
charset = self._cnx.charset
if charset == "utf8mb4":
charset = "utf8"
operation = operation.encode(charset)
except (UnicodeDecodeError, UnicodeEncodeError) as err:
raise errors.ProgrammingError(str(err))
# need to convert %s to ? before sending it to MySQL
if b"%s" in operation:
operation = re.sub(RE_SQL_FIND_PARAM, b"?", operation)
try:
self._stmt = self._cnx.cmd_stmt_prepare(operation)
except errors.Error:
self._executed = None
self._stmt = None
raise
self._cnx.cmd_stmt_reset(self._stmt)
if self._stmt.param_count > 0 and not params:
return
elif params:
if not isinstance(params, (tuple, list)):
raise errors.ProgrammingError(
errno=1210,
msg=f"Incorrect type of argument: {type(params).__name__}({params})"
", it must be of type tuple or list the argument given to "
"the prepared statement")
if self._stmt.param_count != len(params):
raise errors.ProgrammingError(
errno=1210,
msg="Incorrect number of arguments executing prepared "
"statement")
if params is None:
params = ()
res = self._cnx.cmd_stmt_execute(self._stmt, *params)
if res:
self._handle_result(res)
def executemany(self, operation, seq_params):
"""Prepare and execute a MySQL Prepared Statement many times
This method will prepare the given operation and execute with each
tuple found the list seq_params.
If the cursor instance already had a prepared statement, it is
first closed.
"""
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows:
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError) as err:
raise errors.InterfaceError(
"Failed executing the operation; {error}".format(error=err))
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def fetchone(self):
"""Returns next row of a query result set
Returns a tuple or None.
"""
self._check_executed()
return self._fetch_row() or None
def fetchmany(self, size=None):
"""Returns the next set of rows of a result set
Returns a list of tuples.
"""
self._check_executed()
res = []
cnt = size or self.arraysize
while cnt > 0 and self._stmt.have_result_set:
cnt -= 1
row = self._fetch_row()
if row:
res.append(row)
return res
def fetchall(self):
"""Returns all rows of a query result set
Returns a list of tuples.
"""
self._check_executed()
if not self._stmt.have_result_set:
return []
rows = self._cnx.get_rows(prep_stmt=self._stmt)
if self._nextrow and self._nextrow[0]:
rows[0].insert(0, self._nextrow[0])
if not rows[0]:
self._handle_eof()
return []
self._rowcount += len(rows[0])
self._handle_eof()
return rows[0]
| 32.417383 | 88 | 0.576913 |
c5bd00fff979b3eaa3d11b34d77d95a965f81c41 | 542 | py | Python | garagei/torch/modules/reshape.py | artberryx/LSD | 99ee081de2502b4d13c140b474f772db8a5f92fe | [
"MIT"
] | 7 | 2022-02-01T03:02:24.000Z | 2022-02-10T12:54:05.000Z | garagei/torch/modules/reshape.py | artberryx/LSD | 99ee081de2502b4d13c140b474f772db8a5f92fe | [
"MIT"
] | null | null | null | garagei/torch/modules/reshape.py | artberryx/LSD | 99ee081de2502b4d13c140b474f772db8a5f92fe | [
"MIT"
] | 2 | 2022-02-03T03:33:25.000Z | 2022-02-10T12:54:07.000Z | import numpy as np
import torch
from torch import nn
class ReshapeModule(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, x):
assert np.prod(x.shape[1:]) == np.prod(self.shape)
return x.reshape(-1, *self.shape)
class ViewModule(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, x):
assert np.prod(x.shape[1:]) == np.prod(self.shape)
return x.view(-1, *self.shape)
| 21.68 | 58 | 0.607011 |
3184b705d9f26228049b22da9a076f6d64ebdbd4 | 225 | py | Python | src/dat_analysis/analysis_tools/__init__.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | 2 | 2021-03-07T03:17:13.000Z | 2021-03-07T03:17:16.000Z | src/dat_analysis/analysis_tools/__init__.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | 1 | 2021-03-09T00:00:52.000Z | 2021-03-09T00:00:52.000Z | src/dat_analysis/analysis_tools/__init__.py | TimChild/dat_analysis | 2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73 | [
"MIT"
] | null | null | null | from .csq_mapping import calculate_csq_map, calculate_csq_mapped_avg, setup_csq_dat, csq_map_data
from . import entropy
from . import general_fitting
from .nrg import NrgUtil, NRGParams, nrg_func, NRGData, NRG_func_generator
| 45 | 97 | 0.848889 |
c1cae7f307a37de6781d271c72c5c68d63533d14 | 3,258 | py | Python | allennlp/modules/scalar_mix.py | cl-tohoku/allennlp | dffb549a3fe613913e97a34772e56a000a6a2860 | [
"Apache-2.0"
] | null | null | null | allennlp/modules/scalar_mix.py | cl-tohoku/allennlp | dffb549a3fe613913e97a34772e56a000a6a2860 | [
"Apache-2.0"
] | null | null | null | allennlp/modules/scalar_mix.py | cl-tohoku/allennlp | dffb549a3fe613913e97a34772e56a000a6a2860 | [
"Apache-2.0"
] | null | null | null | from typing import List
import torch
from torch.nn import ParameterList, Parameter
from allennlp.common.checks import ConfigurationError
class ScalarMix(torch.nn.Module):
"""
Computes a parameterised scalar mixture of N tensors, ``mixture = gamma * sum(s_k * tensor_k)``
where ``s = softmax(w)``, with ``w`` and ``gamma`` scalar parameters.
In addition, if ``do_layer_norm=True`` then apply layer normalization to each tensor
before weighting.
"""
def __init__(self, mixture_size: int, do_layer_norm: bool = False) -> None:
super(ScalarMix, self).__init__()
self.mixture_size = mixture_size
self.do_layer_norm = do_layer_norm
self.scalar_parameters = ParameterList([Parameter(torch.FloatTensor([0.0]))
for _ in range(mixture_size)])
self.gamma = Parameter(torch.FloatTensor([1.0]))
def forward(self, tensors: List[torch.Tensor], # pylint: disable=arguments-differ
mask: torch.Tensor = None) -> torch.Tensor:
"""
Compute a weighted average of the ``tensors``. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
When ``do_layer_norm=True``, the ``mask`` is required input. If the ``tensors`` are
dimensioned ``(dim_0, ..., dim_{n-1}, dim_n)``, then the ``mask`` is dimensioned
``(dim_0, ..., dim_{n-1})``, as in the typical case with ``tensors`` of shape
``(batch_size, timesteps, dim)`` and ``mask`` of shape ``(batch_size, timesteps)``.
When ``do_layer_norm=False`` the ``mask`` is ignored.
"""
if len(tensors) != self.mixture_size:
raise ConfigurationError("{} tensors were passed, but the module was initialized to "
"mix {} tensors.".format(len(tensors), self.mixture_size))
def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked):
tensor_masked = tensor * broadcast_mask
mean = torch.sum(tensor_masked) / num_elements_not_masked
variance = torch.sum(((tensor_masked - mean) * broadcast_mask)**2) / num_elements_not_masked
return (tensor - mean) / torch.sqrt(variance + 1E-12)
normed_weights = torch.nn.functional.softmax(torch.cat([parameter for parameter
in self.scalar_parameters]), dim=0)
normed_weights = torch.split(normed_weights, split_size=1)
if not self.do_layer_norm:
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
else:
mask_float = mask.float()
broadcast_mask = mask_float.unsqueeze(-1)
input_dim = tensors[0].size(-1)
num_elements_not_masked = torch.sum(mask_float) * input_dim
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * _do_layer_norm(tensor,
broadcast_mask, num_elements_not_masked))
return self.gamma * sum(pieces)
| 45.887324 | 104 | 0.606814 |
7eeddfa1ac02b302b51c4f5aea219b75c58fd1e5 | 18,827 | py | Python | certbot/certbot/_internal/cert_manager.py | lpmitchell/certbot | fe0c0dc3ae6c25c6087e51717a223f38a9b23d2f | [
"Apache-2.0"
] | 16,789 | 2016-05-06T19:49:59.000Z | 2022-03-31T20:01:47.000Z | certbot/certbot/_internal/cert_manager.py | nicholascioli/certbot | 86406ab63aebf463ca4aa0381a55ddeb91231cd2 | [
"Apache-2.0"
] | 5,737 | 2016-05-06T19:26:43.000Z | 2022-03-31T20:27:38.000Z | certbot/certbot/_internal/cert_manager.py | nicholascioli/certbot | 86406ab63aebf463ca4aa0381a55ddeb91231cd2 | [
"Apache-2.0"
] | 2,974 | 2016-05-06T19:51:23.000Z | 2022-03-31T21:01:36.000Z | """Tools for managing certificates."""
import datetime
import logging
import re
import traceback
from typing import Any
from typing import Callable
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
import pytz
from certbot import configuration
from certbot import crypto_util
from certbot import errors
from certbot import ocsp
from certbot import util
from certbot._internal import storage
from certbot.compat import os
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
###################
# Commands
###################
def update_live_symlinks(config: configuration.NamespaceConfig) -> None:
"""Update the certificate file family symlinks to use archive_dir.
Use the information in the config file to make symlinks point to
the correct archive directory.
.. note:: This assumes that the installation is using a Reverter object.
:param config: Configuration.
:type config: :class:`certbot._internal.configuration.NamespaceConfig`
"""
for renewal_file in storage.renewal_conf_files(config):
storage.RenewableCert(renewal_file, config, update_symlinks=True)
def rename_lineage(config: configuration.NamespaceConfig) -> None:
"""Rename the specified lineage to the new name.
:param config: Configuration.
:type config: :class:`certbot._internal.configuration.NamespaceConfig`
"""
certname = get_certnames(config, "rename")[0]
new_certname = config.new_certname
if not new_certname:
code, new_certname = display_util.input_text(
"Enter the new name for certificate {0}".format(certname),
force_interactive=True)
if code != display_util.OK or not new_certname:
raise errors.Error("User ended interaction.")
lineage = lineage_for_certname(config, certname)
if not lineage:
raise errors.ConfigurationError("No existing certificate with name "
"{0} found.".format(certname))
storage.rename_renewal_config(certname, new_certname, config)
display_util.notification("Successfully renamed {0} to {1}."
.format(certname, new_certname), pause=False)
def certificates(config: configuration.NamespaceConfig) -> None:
"""Display information about certs configured with Certbot
:param config: Configuration.
:type config: :class:`certbot._internal.configuration.NamespaceConfig`
"""
parsed_certs = []
parse_failures = []
for renewal_file in storage.renewal_conf_files(config):
try:
renewal_candidate = storage.RenewableCert(renewal_file, config)
crypto_util.verify_renewable_cert(renewal_candidate)
parsed_certs.append(renewal_candidate)
except Exception as e: # pylint: disable=broad-except
logger.warning("Renewal configuration file %s produced an "
"unexpected error: %s. Skipping.", renewal_file, e)
logger.debug("Traceback was:\n%s", traceback.format_exc())
parse_failures.append(renewal_file)
# Describe all the certs
_describe_certs(config, parsed_certs, parse_failures)
def delete(config: configuration.NamespaceConfig) -> None:
"""Delete Certbot files associated with a certificate lineage."""
certnames = get_certnames(config, "delete", allow_multiple=True)
msg = ["The following certificate(s) are selected for deletion:\n"]
for certname in certnames:
msg.append(" * " + certname)
msg.append(
"\nWARNING: Before continuing, ensure that the listed certificates are not being used "
"by any installed server software (e.g. Apache, nginx, mail servers). Deleting a "
"certificate that is still being used will cause the server software to stop working. "
"See https://certbot.org/deleting-certs for information on deleting certificates safely."
)
msg.append("\nAre you sure you want to delete the above certificate(s)?")
if not display_util.yesno("\n".join(msg), default=True):
logger.info("Deletion of certificate(s) canceled.")
return
for certname in certnames:
storage.delete_files(config, certname)
display_util.notify("Deleted all files relating to certificate {0}."
.format(certname))
###################
# Public Helpers
###################
def lineage_for_certname(cli_config: configuration.NamespaceConfig,
certname: str) -> Optional[storage.RenewableCert]:
"""Find a lineage object with name certname."""
configs_dir = cli_config.renewal_configs_dir
# Verify the directory is there
util.make_or_verify_dir(configs_dir, mode=0o755)
try:
renewal_file = storage.renewal_file_for_certname(cli_config, certname)
except errors.CertStorageError:
return None
try:
return storage.RenewableCert(renewal_file, cli_config)
except (errors.CertStorageError, IOError):
logger.debug("Renewal conf file %s is broken.", renewal_file)
logger.debug("Traceback was:\n%s", traceback.format_exc())
return None
def domains_for_certname(config: configuration.NamespaceConfig,
certname: str) -> Optional[List[str]]:
"""Find the domains in the cert with name certname."""
lineage = lineage_for_certname(config, certname)
return lineage.names() if lineage else None
def find_duplicative_certs(config: configuration.NamespaceConfig,
domains: List[str]) -> Tuple[Optional[storage.RenewableCert],
Optional[storage.RenewableCert]]:
"""Find existing certs that match the given domain names.
This function searches for certificates whose domains are equal to
the `domains` parameter and certificates whose domains are a subset
of the domains in the `domains` parameter. If multiple certificates
are found whose names are a subset of `domains`, the one whose names
are the largest subset of `domains` is returned.
If multiple certificates' domains are an exact match or equally
sized subsets, which matching certificates are returned is
undefined.
:param config: Configuration.
:type config: :class:`certbot._internal.configuration.NamespaceConfig`
:param domains: List of domain names
:type domains: `list` of `str`
:returns: lineages representing the identically matching cert and the
largest subset if they exist
:rtype: `tuple` of `storage.RenewableCert` or `None`
"""
def update_certs_for_domain_matches(candidate_lineage: storage.RenewableCert,
rv: Tuple[Optional[storage.RenewableCert],
Optional[storage.RenewableCert]]
) -> Tuple[Optional[storage.RenewableCert],
Optional[storage.RenewableCert]]:
"""Return cert as identical_names_cert if it matches,
or subset_names_cert if it matches as subset
"""
# TODO: Handle these differently depending on whether they are
# expired or still valid?
identical_names_cert, subset_names_cert = rv
candidate_names = set(candidate_lineage.names())
if candidate_names == set(domains):
identical_names_cert = candidate_lineage
elif candidate_names.issubset(set(domains)):
# This logic finds and returns the largest subset-names cert
# in the case where there are several available.
if subset_names_cert is None:
subset_names_cert = candidate_lineage
elif len(candidate_names) > len(subset_names_cert.names()):
subset_names_cert = candidate_lineage
return (identical_names_cert, subset_names_cert)
init: Tuple[Optional[storage.RenewableCert], Optional[storage.RenewableCert]] = (None, None)
return _search_lineages(config, update_certs_for_domain_matches, init)
def _archive_files(candidate_lineage: storage.RenewableCert, filetype: str) -> Optional[List[str]]:
""" In order to match things like:
/etc/letsencrypt/archive/example.com/chain1.pem.
Anonymous functions which call this function are eventually passed (in a list) to
`match_and_check_overlaps` to help specify the acceptable_matches.
:param `.storage.RenewableCert` candidate_lineage: Lineage whose archive dir is to
be searched.
:param str filetype: main file name prefix e.g. "fullchain" or "chain".
:returns: Files in candidate_lineage's archive dir that match the provided filetype.
:rtype: list of str or None
"""
archive_dir = candidate_lineage.archive_dir
pattern = [os.path.join(archive_dir, f) for f in os.listdir(archive_dir)
if re.match("{0}[0-9]*.pem".format(filetype), f)]
if pattern:
return pattern
return None
def _acceptable_matches() -> List[Union[Callable[[storage.RenewableCert], str],
Callable[[storage.RenewableCert], Optional[List[str]]]]]:
""" Generates the list that's passed to match_and_check_overlaps. Is its own function to
make unit testing easier.
:returns: list of functions
:rtype: list
"""
return [lambda x: x.fullchain_path, lambda x: x.cert_path,
lambda x: _archive_files(x, "cert"), lambda x: _archive_files(x, "fullchain")]
def cert_path_to_lineage(cli_config: configuration.NamespaceConfig) -> str:
""" If config.cert_path is defined, try to find an appropriate value for config.certname.
:param `configuration.NamespaceConfig` cli_config: parsed command line arguments
:returns: a lineage name
:rtype: str
:raises `errors.Error`: If the specified cert path can't be matched to a lineage name.
:raises `errors.OverlappingMatchFound`: If the matched lineage's archive is shared.
"""
acceptable_matches = _acceptable_matches()
match = match_and_check_overlaps(cli_config, acceptable_matches,
lambda x: cli_config.cert_path, lambda x: x.lineagename)
return match[0]
def match_and_check_overlaps(cli_config: configuration.NamespaceConfig,
acceptable_matches: Iterable[Union[
Callable[[storage.RenewableCert], str],
Callable[[storage.RenewableCert], Optional[List[str]]]]],
match_func: Callable[[storage.RenewableCert], str],
rv_func: Callable[[storage.RenewableCert], str]) -> List[str]:
""" Searches through all lineages for a match, and checks for duplicates.
If a duplicate is found, an error is raised, as performing operations on lineages
that have their properties incorrectly duplicated elsewhere is probably a bad idea.
:param `configuration.NamespaceConfig` cli_config: parsed command line arguments
:param list acceptable_matches: a list of functions that specify acceptable matches
:param function match_func: specifies what to match
:param function rv_func: specifies what to return
"""
def find_matches(candidate_lineage: storage.RenewableCert, return_value: List[str],
acceptable_matches: Iterable[Union[
Callable[[storage.RenewableCert], str],
Callable[[storage.RenewableCert], Optional[List[str]]]]]) -> List[str]:
"""Returns a list of matches using _search_lineages."""
acceptable_matches_resolved = [func(candidate_lineage) for func in acceptable_matches]
acceptable_matches_rv: List[str] = []
for item in acceptable_matches_resolved:
if isinstance(item, list):
acceptable_matches_rv += item
elif item:
acceptable_matches_rv.append(item)
match = match_func(candidate_lineage)
if match in acceptable_matches_rv:
return_value.append(rv_func(candidate_lineage))
return return_value
matched: List[str] = _search_lineages(cli_config, find_matches, [], acceptable_matches)
if not matched:
raise errors.Error("No match found for cert-path {0}!".format(cli_config.cert_path))
elif len(matched) > 1:
raise errors.OverlappingMatchFound()
return matched
def human_readable_cert_info(config: configuration.NamespaceConfig, cert: storage.RenewableCert,
skip_filter_checks: bool = False) -> Optional[str]:
""" Returns a human readable description of info about a RenewableCert object"""
certinfo = []
checker = ocsp.RevocationChecker()
if config.certname and cert.lineagename != config.certname and not skip_filter_checks:
return None
if config.domains and not set(config.domains).issubset(cert.names()):
return None
now = pytz.UTC.fromutc(datetime.datetime.utcnow())
reasons = []
if cert.is_test_cert:
reasons.append('TEST_CERT')
if cert.target_expiry <= now:
reasons.append('EXPIRED')
elif checker.ocsp_revoked(cert):
reasons.append('REVOKED')
if reasons:
status = "INVALID: " + ", ".join(reasons)
else:
diff = cert.target_expiry - now
if diff.days == 1:
status = "VALID: 1 day"
elif diff.days < 1:
status = "VALID: {0} hour(s)".format(diff.seconds // 3600)
else:
status = "VALID: {0} days".format(diff.days)
valid_string = "{0} ({1})".format(cert.target_expiry, status)
serial = format(crypto_util.get_serial_from_cert(cert.cert_path), 'x')
certinfo.append(" Certificate Name: {}\n"
" Serial Number: {}\n"
" Key Type: {}\n"
" Domains: {}\n"
" Expiry Date: {}\n"
" Certificate Path: {}\n"
" Private Key Path: {}".format(
cert.lineagename,
serial,
cert.private_key_type,
" ".join(cert.names()),
valid_string,
cert.fullchain,
cert.privkey))
return "".join(certinfo)
def get_certnames(config: configuration.NamespaceConfig, verb: str, allow_multiple: bool = False,
custom_prompt: Optional[str] = None) -> List[str]:
"""Get certname from flag, interactively, or error out."""
certname = config.certname
if certname:
certnames = [certname]
else:
filenames = storage.renewal_conf_files(config)
choices = [storage.lineagename_for_filename(name) for name in filenames]
if not choices:
raise errors.Error("No existing certificates found.")
if allow_multiple:
if not custom_prompt:
prompt = "Which certificate(s) would you like to {0}?".format(verb)
else:
prompt = custom_prompt
code, certnames = display_util.checklist(
prompt, choices, cli_flag="--cert-name", force_interactive=True)
if code != display_util.OK:
raise errors.Error("User ended interaction.")
else:
if not custom_prompt:
prompt = "Which certificate would you like to {0}?".format(verb)
else:
prompt = custom_prompt
code, index = display_util.menu(
prompt, choices, cli_flag="--cert-name", force_interactive=True)
if code != display_util.OK or index not in range(0, len(choices)):
raise errors.Error("User ended interaction.")
certnames = [choices[index]]
return certnames
###################
# Private Helpers
###################
def _report_lines(msgs: Iterable[str]) -> str:
"""Format a results report for a category of single-line renewal outcomes"""
return " " + "\n ".join(str(msg) for msg in msgs)
def _report_human_readable(config: configuration.NamespaceConfig,
parsed_certs: Iterable[storage.RenewableCert]) -> str:
"""Format a results report for a parsed cert"""
certinfo = []
for cert in parsed_certs:
cert_info = human_readable_cert_info(config, cert)
if cert_info is not None:
certinfo.append(cert_info)
return "\n".join(certinfo)
def _describe_certs(config: configuration.NamespaceConfig,
parsed_certs: Iterable[storage.RenewableCert],
parse_failures: Iterable[str]) -> None:
"""Print information about the certs we know about"""
out: List[str] = []
notify = out.append
if not parsed_certs and not parse_failures:
notify("No certificates found.")
else:
if parsed_certs:
match = "matching " if config.certname or config.domains else ""
notify("Found the following {0}certs:".format(match))
notify(_report_human_readable(config, parsed_certs))
if parse_failures:
notify("\nThe following renewal configurations "
"were invalid:")
notify(_report_lines(parse_failures))
display_util.notification("\n".join(out), pause=False, wrap=False)
T = TypeVar('T')
def _search_lineages(cli_config: configuration.NamespaceConfig, func: Callable[..., T],
initial_rv: T, *args: Any) -> T:
"""Iterate func over unbroken lineages, allowing custom return conditions.
Allows flexible customization of return values, including multiple
return values and complex checks.
:param `configuration.NamespaceConfig` cli_config: parsed command line arguments
:param function func: function used while searching over lineages
:param initial_rv: initial return value of the function (any type)
:returns: Whatever was specified by `func` if a match is found.
"""
configs_dir = cli_config.renewal_configs_dir
# Verify the directory is there
util.make_or_verify_dir(configs_dir, mode=0o755)
rv = initial_rv
for renewal_file in storage.renewal_conf_files(cli_config):
try:
candidate_lineage = storage.RenewableCert(renewal_file, cli_config)
except (errors.CertStorageError, IOError):
logger.debug("Renewal conf file %s is broken. Skipping.", renewal_file)
logger.debug("Traceback was:\n%s", traceback.format_exc())
continue
rv = func(candidate_lineage, rv, *args)
return rv
| 41.745011 | 99 | 0.65353 |
6455f39ce870b33430a7b3c16277cec12f106bf7 | 69,529 | py | Python | Python-3.5.5/Lib/logging/__init__.py | it315/PSPNet-Keras-tensorflow | 876448d9c44a8ca475cf0f60f69eb3c72651be87 | [
"MIT"
] | 10 | 2018-12-18T18:04:28.000Z | 2021-04-23T07:31:13.000Z | Python-3.5.5/Lib/logging/__init__.py | it315/PSPNet-Keras-tensorflow | 876448d9c44a8ca475cf0f60f69eb3c72651be87 | [
"MIT"
] | 5 | 2021-12-14T20:56:36.000Z | 2021-12-20T14:45:34.000Z | Python-3.5.10/Lib/logging/__init__.py | AtriCZE23/POe-full | 89be2fda5747e44764a62ba5e358d8c9309fbf0a | [
"MIT",
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3 | 2019-06-22T14:16:57.000Z | 2021-12-29T22:04:42.000Z | # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref, collections
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
# The following module attributes are no longer updated.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL': CRITICAL,
'ERROR': ERROR,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
# See Issues #22386, #27937 and #29220 for why it's this way
result = _levelToName.get(level)
if result is not None:
return result
result = _nameToLevel.get(level)
if result is not None:
return result
return "Level %s" % level
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelToName[level] = levelName
_nameToLevel[levelName] = level
finally:
_releaseLock()
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except Exception:
return sys.exc_info()[2].tb_frame.f_back
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame, by skipping frames whose filename is that of this
# module's source. It therefore should contain the filename of this module's
# source file.
#
# Ordinarily we would use __file__ for this, but frozen modules don't always
# have __file__ set, for some reason (see Issue #21736). Thus, we get the
# filename from a handy code object from a function defined in this module.
# (There's no particular reason for picking addLevelName.)
#
_srcfile = os.path.normcase(addLevelName.__code__.co_filename)
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called. You can also do this if you want to avoid
# the overhead of fetching caller information, even when _getframe() is
# available.
#if not hasattr(sys, '_getframe'):
# _srcfile = None
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
__repr__ = __str__
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
_STYLES = {
'%': (PercentStyle, BASIC_FORMAT),
'{': (StrFormatStyle, '{levelname}:{name}:{message}'),
'$': (StringTemplateStyle, '${levelname}:${name}:${message}'),
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged:: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style][0](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged:: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
acquire()
try:
if wr in handlers:
handlers.remove(wr)
finally:
release()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
t, v, tb = sys.exc_info()
try:
sys.stderr.write('--- Logging error ---\n')
traceback.print_exception(t, v, tb, None, sys.stderr)
sys.stderr.write('Call stack:\n')
# Walk the stack frame up until we're out of logging,
# so as to print the calling context.
frame = tb.tb_frame
while (frame and os.path.dirname(frame.f_code.co_filename) ==
__path__[0]):
frame = frame.f_back
if frame:
traceback.print_stack(frame, file=sys.stderr)
else:
# couldn't find the right stack frame, for some reason
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
# Issue 18671: output logging message and arguments
try:
sys.stderr.write('Message: %r\n'
'Arguments: %s\n' % (record.msg,
record.args))
except Exception:
sys.stderr.write('Unable to print the message and arguments'
' - possible formatting error.\nUse the'
' traceback above to help find the error.\n'
)
except OSError: #pragma: no cover
pass # see issue 5971
finally:
del t, v, tb
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, "close"):
stream.close()
finally:
# Issue #19523: call unconditionally to
# prevent a handler leak when delay is set
StreamHandler.close(self)
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, exc_info=True, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if isinstance(exc_info, BaseException):
exc_info = (type(exc_info), exc_info, exc_info.__traceback__)
elif not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, exc_info=True, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.pop("handlers", None)
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.pop("filename", None)
mode = kwargs.pop("filemode", 'a')
if filename:
h = FileHandler(filename, mode)
else:
stream = kwargs.pop("stream", None)
h = StreamHandler(stream)
handlers = [h]
dfs = kwargs.pop("datefmt", None)
style = kwargs.pop("style", '%')
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
fs = kwargs.pop("format", _STYLES[style][1])
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.pop("level", None)
if level is not None:
root.setLevel(level)
if kwargs:
keys = ', '.join(kwargs.keys())
raise ValueError('Unrecognised argument(s): %s' % keys)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, exc_info=True, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
error(msg, *args, exc_info=exc_info, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (OSError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except: # ignore everything, as we're shutting down
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| 35.401731 | 89 | 0.591077 |
03f90baae4ff70e1b31d69e4414b67e45ce1f979 | 64,620 | py | Python | discord/abc.py | curiositIy/discord.py | f6a74f74a7aed0879fc086805eae8873e745d0ea | [
"MIT"
] | 1 | 2021-12-21T16:30:30.000Z | 2021-12-21T16:30:30.000Z | discord/abc.py | curiositIy/discord.py | f6a74f74a7aed0879fc086805eae8873e745d0ea | [
"MIT"
] | null | null | null | discord/abc.py | curiositIy/discord.py | f6a74f74a7aed0879fc086805eae8873e745d0ea | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import copy
import time
import asyncio
from datetime import datetime
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterable,
List,
Optional,
TYPE_CHECKING,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
overload,
runtime_checkable,
)
from .object import OLDEST_OBJECT, Object
from .context_managers import Typing
from .enums import ChannelType
from .errors import ClientException
from .mentions import AllowedMentions
from .permissions import PermissionOverwrite, Permissions
from .role import Role
from .invite import Invite
from .file import File
from .http import handle_message_parameters
from .voice_client import VoiceClient, VoiceProtocol
from .sticker import GuildSticker, StickerItem
from . import utils
__all__ = (
'Snowflake',
'User',
'PrivateChannel',
'GuildChannel',
'Messageable',
'Connectable',
)
T = TypeVar('T', bound=VoiceProtocol)
if TYPE_CHECKING:
from typing_extensions import Self
from .client import Client
from .user import ClientUser
from .asset import Asset
from .state import ConnectionState
from .guild import Guild
from .member import Member
from .channel import CategoryChannel
from .embeds import Embed
from .message import Message, MessageReference, PartialMessage
from .channel import TextChannel, DMChannel, GroupChannel, PartialMessageable, VoiceChannel
from .threads import Thread
from .enums import InviteTarget
from .ui.view import View
from .types.channel import (
PermissionOverwrite as PermissionOverwritePayload,
Channel as ChannelPayload,
GuildChannel as GuildChannelPayload,
OverwriteType,
)
from .types.snowflake import (
SnowflakeList,
)
PartialMessageableChannel = Union[TextChannel, VoiceChannel, Thread, DMChannel, PartialMessageable]
MessageableChannel = Union[PartialMessageableChannel, GroupChannel]
SnowflakeTime = Union["Snowflake", datetime]
MISSING = utils.MISSING
class _Undefined:
def __repr__(self) -> str:
return 'see-below'
_undefined: Any = _Undefined()
async def _single_delete_strategy(messages: Iterable[Message], *, reason: Optional[str] = None):
for m in messages:
await m.delete()
async def _purge_helper(
channel: Union[Thread, TextChannel, VoiceChannel],
*,
limit: Optional[int] = 100,
check: Callable[[Message], bool] = MISSING,
before: Optional[SnowflakeTime] = None,
after: Optional[SnowflakeTime] = None,
around: Optional[SnowflakeTime] = None,
oldest_first: Optional[bool] = False,
bulk: bool = True,
reason: Optional[str] = None,
) -> List[Message]:
if check is MISSING:
check = lambda m: True
iterator = channel.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around)
ret: List[Message] = []
count = 0
minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
strategy = channel.delete_messages if bulk else _single_delete_strategy
async for message in iterator:
if count == 100:
to_delete = ret[-100:]
await strategy(to_delete, reason=reason)
count = 0
await asyncio.sleep(1)
if not check(message):
continue
if message.id < minimum_time:
# older than 14 days old
if count == 1:
await ret[-1].delete()
elif count >= 2:
to_delete = ret[-count:]
await strategy(to_delete, reason=reason)
count = 0
strategy = _single_delete_strategy
count += 1
ret.append(message)
# Some messages remaining to poll
if count >= 2:
# more than 2 messages -> bulk delete
to_delete = ret[-count:]
await strategy(to_delete, reason=reason)
elif count == 1:
# delete a single message
await ret[-1].delete()
return ret
@runtime_checkable
class Snowflake(Protocol):
"""An ABC that details the common operations on a Discord model.
Almost all :ref:`Discord models <discord_api_models>` meet this
abstract base class.
If you want to create a snowflake on your own, consider using
:class:`.Object`.
Attributes
-----------
id: :class:`int`
The model's unique ID.
"""
__slots__ = ()
id: int
@runtime_checkable
class User(Snowflake, Protocol):
"""An ABC that details the common operations on a Discord user.
The following implement this ABC:
- :class:`~discord.User`
- :class:`~discord.ClientUser`
- :class:`~discord.Member`
This ABC must also implement :class:`~discord.abc.Snowflake`.
Attributes
-----------
name: :class:`str`
The user's username.
discriminator: :class:`str`
The user's discriminator.
bot: :class:`bool`
If the user is a bot account.
system: :class:`bool`
If the user is a system account.
"""
__slots__ = ()
name: str
discriminator: str
bot: bool
system: bool
@property
def display_name(self) -> str:
""":class:`str`: Returns the user's display name."""
raise NotImplementedError
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given user."""
raise NotImplementedError
@property
def avatar(self) -> Optional[Asset]:
"""Optional[:class:`~discord.Asset`]: Returns an Asset that represents the user's avatar, if present."""
raise NotImplementedError
@property
def default_avatar(self) -> Asset:
""":class:`~discord.Asset`: Returns the default avatar for a given user. This is calculated by the user's discriminator."""
raise NotImplementedError
@property
def display_avatar(self) -> Asset:
""":class:`~discord.Asset`: Returns the user's display avatar.
For regular users this is just their default avatar or uploaded avatar.
.. versionadded:: 2.0
"""
raise NotImplementedError
def mentioned_in(self, message: Message) -> bool:
"""Checks if the user is mentioned in the specified message.
Parameters
-----------
message: :class:`~discord.Message`
The message to check if you're mentioned in.
Returns
-------
:class:`bool`
Indicates if the user is mentioned in the message.
"""
raise NotImplementedError
@runtime_checkable
class PrivateChannel(Snowflake, Protocol):
"""An ABC that details the common operations on a private Discord channel.
The following implement this ABC:
- :class:`~discord.DMChannel`
- :class:`~discord.GroupChannel`
This ABC must also implement :class:`~discord.abc.Snowflake`.
Attributes
-----------
me: :class:`~discord.ClientUser`
The user presenting yourself.
"""
__slots__ = ()
me: ClientUser
class _Overwrites:
__slots__ = ('id', 'allow', 'deny', 'type')
ROLE = 0
MEMBER = 1
def __init__(self, data: PermissionOverwritePayload) -> None:
self.id: int = int(data['id'])
self.allow: int = int(data.get('allow', 0))
self.deny: int = int(data.get('deny', 0))
self.type: OverwriteType = data['type']
def _asdict(self) -> PermissionOverwritePayload:
return {
'id': self.id,
'allow': str(self.allow),
'deny': str(self.deny),
'type': self.type,
}
def is_role(self) -> bool:
return self.type == 0
def is_member(self) -> bool:
return self.type == 1
class GuildChannel:
"""An ABC that details the common operations on a Discord guild channel.
The following implement this ABC:
- :class:`~discord.TextChannel`
- :class:`~discord.VoiceChannel`
- :class:`~discord.CategoryChannel`
- :class:`~discord.StageChannel`
- :class:`~discord.ForumChannel`
This ABC must also implement :class:`~discord.abc.Snowflake`.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`~discord.Guild`
The guild the channel belongs to.
position: :class:`int`
The position in the channel list. This is a number that starts at 0.
e.g. the top channel is position 0.
"""
__slots__ = ()
id: int
name: str
guild: Guild
type: ChannelType
position: int
category_id: Optional[int]
_state: ConnectionState
_overwrites: List[_Overwrites]
if TYPE_CHECKING:
def __init__(self, *, state: ConnectionState, guild: Guild, data: GuildChannelPayload):
...
def __str__(self) -> str:
return self.name
@property
def _sorting_bucket(self) -> int:
raise NotImplementedError
def _update(self, guild: Guild, data: Dict[str, Any]) -> None:
raise NotImplementedError
async def _move(
self,
position: int,
parent_id: Optional[Any] = None,
lock_permissions: bool = False,
*,
reason: Optional[str],
) -> None:
if position < 0:
raise ValueError('Channel position cannot be less than 0.')
http = self._state.http
bucket = self._sorting_bucket
channels: List[GuildChannel] = [c for c in self.guild.channels if c._sorting_bucket == bucket]
channels.sort(key=lambda c: c.position)
try:
# remove ourselves from the channel list
channels.remove(self)
except ValueError:
# not there somehow lol
return
else:
index = next((i for i, c in enumerate(channels) if c.position >= position), len(channels))
# add ourselves at our designated position
channels.insert(index, self)
payload = []
for index, c in enumerate(channels):
d: Dict[str, Any] = {'id': c.id, 'position': index}
if parent_id is not _undefined and c.id == self.id:
d.update(parent_id=parent_id, lock_permissions=lock_permissions)
payload.append(d)
await http.bulk_channel_update(self.guild.id, payload, reason=reason)
async def _edit(self, options: Dict[str, Any], reason: Optional[str]) -> Optional[ChannelPayload]:
try:
parent = options.pop('category')
except KeyError:
parent_id = _undefined
else:
parent_id = parent and parent.id
try:
options['rate_limit_per_user'] = options.pop('slowmode_delay')
except KeyError:
pass
try:
rtc_region = options.pop('rtc_region')
except KeyError:
pass
else:
options['rtc_region'] = None if rtc_region is None else str(rtc_region)
try:
video_quality_mode = options.pop('video_quality_mode')
except KeyError:
pass
else:
options['video_quality_mode'] = int(video_quality_mode)
lock_permissions = options.pop('sync_permissions', False)
try:
position = options.pop('position')
except KeyError:
if parent_id is not _undefined:
if lock_permissions:
category = self.guild.get_channel(parent_id)
if category:
options['permission_overwrites'] = [c._asdict() for c in category._overwrites]
options['parent_id'] = parent_id
elif lock_permissions and self.category_id is not None:
# if we're syncing permissions on a pre-existing channel category without changing it
# we need to update the permissions to point to the pre-existing category
category = self.guild.get_channel(self.category_id)
if category:
options['permission_overwrites'] = [c._asdict() for c in category._overwrites]
else:
await self._move(position, parent_id=parent_id, lock_permissions=lock_permissions, reason=reason)
overwrites = options.get('overwrites', None)
if overwrites is not None:
perms = []
for target, perm in overwrites.items():
if not isinstance(perm, PermissionOverwrite):
raise TypeError(f'Expected PermissionOverwrite received {perm.__class__.__name__}')
allow, deny = perm.pair()
payload = {
'allow': allow.value,
'deny': deny.value,
'id': target.id,
}
if isinstance(target, Role):
payload['type'] = _Overwrites.ROLE
else:
payload['type'] = _Overwrites.MEMBER
perms.append(payload)
options['permission_overwrites'] = perms
try:
ch_type = options['type']
except KeyError:
pass
else:
if not isinstance(ch_type, ChannelType):
raise TypeError('type field must be of type ChannelType')
options['type'] = ch_type.value
if options:
return await self._state.http.edit_channel(self.id, reason=reason, **options)
def _fill_overwrites(self, data: GuildChannelPayload) -> None:
self._overwrites = []
everyone_index = 0
everyone_id = self.guild.id
for index, overridden in enumerate(data.get('permission_overwrites', [])):
overwrite = _Overwrites(overridden)
self._overwrites.append(overwrite)
if overwrite.type == _Overwrites.MEMBER:
continue
if overwrite.id == everyone_id:
# the @everyone role is not guaranteed to be the first one
# in the list of permission overwrites, however the permission
# resolution code kind of requires that it is the first one in
# the list since it is special. So we need the index so we can
# swap it to be the first one.
everyone_index = index
# do the swap
tmp = self._overwrites
if tmp:
tmp[everyone_index], tmp[0] = tmp[0], tmp[everyone_index]
@property
def changed_roles(self) -> List[Role]:
"""List[:class:`~discord.Role`]: Returns a list of roles that have been overridden from
their default values in the :attr:`~discord.Guild.roles` attribute."""
ret = []
g = self.guild
for overwrite in filter(lambda o: o.is_role(), self._overwrites):
role = g.get_role(overwrite.id)
if role is None:
continue
role = copy.copy(role)
role.permissions.handle_overwrite(overwrite.allow, overwrite.deny)
ret.append(role)
return ret
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f'<#{self.id}>'
@property
def jump_url(self) -> str:
""":class:`str`: Returns a URL that allows the client to jump to the channel.
.. versionadded:: 2.0
"""
return f'https://discord.com/channels/{self.guild.id}/{self.id}'
@property
def created_at(self) -> datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def overwrites_for(self, obj: Union[Role, User]) -> PermissionOverwrite:
"""Returns the channel-specific overwrites for a member or a role.
Parameters
-----------
obj: Union[:class:`~discord.Role`, :class:`~discord.abc.User`]
The role or user denoting
whose overwrite to get.
Returns
---------
:class:`~discord.PermissionOverwrite`
The permission overwrites for this object.
"""
if isinstance(obj, User):
predicate = lambda p: p.is_member()
elif isinstance(obj, Role):
predicate = lambda p: p.is_role()
else:
predicate = lambda p: True
for overwrite in filter(predicate, self._overwrites):
if overwrite.id == obj.id:
allow = Permissions(overwrite.allow)
deny = Permissions(overwrite.deny)
return PermissionOverwrite.from_pair(allow, deny)
return PermissionOverwrite()
@property
def overwrites(self) -> Dict[Union[Role, Member], PermissionOverwrite]:
"""Returns all of the channel's overwrites.
This is returned as a dictionary where the key contains the target which
can be either a :class:`~discord.Role` or a :class:`~discord.Member` and the value is the
overwrite as a :class:`~discord.PermissionOverwrite`.
Returns
--------
Dict[Union[:class:`~discord.Role`, :class:`~discord.Member`], :class:`~discord.PermissionOverwrite`]
The channel's permission overwrites.
"""
ret = {}
for ow in self._overwrites:
allow = Permissions(ow.allow)
deny = Permissions(ow.deny)
overwrite = PermissionOverwrite.from_pair(allow, deny)
target = None
if ow.is_role():
target = self.guild.get_role(ow.id)
elif ow.is_member():
target = self.guild.get_member(ow.id)
# TODO: There is potential data loss here in the non-chunked
# case, i.e. target is None because get_member returned nothing.
# This can be fixed with a slight breaking change to the return type,
# i.e. adding discord.Object to the list of it
# However, for now this is an acceptable compromise.
if target is not None:
ret[target] = overwrite
return ret
@property
def category(self) -> Optional[CategoryChannel]:
"""Optional[:class:`~discord.CategoryChannel`]: The category this channel belongs to.
If there is no category then this is ``None``.
"""
return self.guild.get_channel(self.category_id) # type: ignore # These are coerced into CategoryChannel
@property
def permissions_synced(self) -> bool:
""":class:`bool`: Whether or not the permissions for this channel are synced with the
category it belongs to.
If there is no category then this is ``False``.
.. versionadded:: 1.3
"""
if self.category_id is None:
return False
category = self.guild.get_channel(self.category_id)
return bool(category and category.overwrites == self.overwrites)
def permissions_for(self, obj: Union[Member, Role], /) -> Permissions:
"""Handles permission resolution for the :class:`~discord.Member`
or :class:`~discord.Role`.
This function takes into consideration the following cases:
- Guild owner
- Guild roles
- Channel overrides
- Member overrides
- Member timeout
If a :class:`~discord.Role` is passed, then it checks the permissions
someone with that role would have, which is essentially:
- The default role permissions
- The permissions of the role used as a parameter
- The default role permission overwrites
- The permission overwrites of the role used as a parameter
.. versionchanged:: 2.0
The object passed in can now be a role object.
.. versionchanged:: 2.0
``obj`` parameter is now positional-only.
Parameters
----------
obj: Union[:class:`~discord.Member`, :class:`~discord.Role`]
The object to resolve permissions for. This could be either
a member or a role. If it's a role then member overwrites
are not computed.
Returns
-------
:class:`~discord.Permissions`
The resolved permissions for the member or role.
"""
# The current cases can be explained as:
# Guild owner get all permissions -- no questions asked. Otherwise...
# The @everyone role gets the first application.
# After that, the applied roles that the user has in the channel
# (or otherwise) are then OR'd together.
# After the role permissions are resolved, the member permissions
# have to take into effect.
# After all that is done.. you have to do the following:
# If manage permissions is True, then all permissions are set to True.
# The operation first takes into consideration the denied
# and then the allowed.
if self.guild.owner_id == obj.id:
return Permissions.all()
default = self.guild.default_role
base = Permissions(default.permissions.value)
# Handle the role case first
if isinstance(obj, Role):
base.value |= obj._permissions
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
except IndexError:
pass
if obj.is_default():
return base
overwrite = utils.get(self._overwrites, type=_Overwrites.ROLE, id=obj.id)
if overwrite is not None:
base.handle_overwrite(overwrite.allow, overwrite.deny)
return base
roles = obj._roles
get_role = self.guild.get_role
# Apply guild roles that the member has.
for role_id in roles:
role = get_role(role_id)
if role is not None:
base.value |= role._permissions
# Guild-wide Administrator -> True for everything
# Bypass all channel-specific overrides
if base.administrator:
return Permissions.all()
# Apply @everyone allow/deny first since it's special
try:
maybe_everyone = self._overwrites[0]
if maybe_everyone.id == self.guild.id:
base.handle_overwrite(allow=maybe_everyone.allow, deny=maybe_everyone.deny)
remaining_overwrites = self._overwrites[1:]
else:
remaining_overwrites = self._overwrites
except IndexError:
remaining_overwrites = self._overwrites
denies = 0
allows = 0
# Apply channel specific role permission overwrites
for overwrite in remaining_overwrites:
if overwrite.is_role() and roles.has(overwrite.id):
denies |= overwrite.deny
allows |= overwrite.allow
base.handle_overwrite(allow=allows, deny=denies)
# Apply member specific permission overwrites
for overwrite in remaining_overwrites:
if overwrite.is_member() and overwrite.id == obj.id:
base.handle_overwrite(allow=overwrite.allow, deny=overwrite.deny)
break
# if you can't send a message in a channel then you can't have certain
# permissions as well
if not base.send_messages:
base.send_tts_messages = False
base.mention_everyone = False
base.embed_links = False
base.attach_files = False
# if you can't read a channel then you have no permissions there
if not base.read_messages:
denied = Permissions.all_channel()
base.value &= ~denied.value
if obj.is_timed_out():
# Timeout leads to every permission except VIEW_CHANNEL and READ_MESSAGE_HISTORY
# being explicitly denied
# N.B.: This *must* come last, because it's a conclusive mask
base.value &= Permissions._timeout_mask()
return base
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the channel.
You must have :attr:`~discord.Permissions.manage_channels` permission to use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this channel.
Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have proper permissions to delete the channel.
~discord.NotFound
The channel was not found or was already deleted.
~discord.HTTPException
Deleting the channel failed.
"""
await self._state.http.delete_channel(self.id, reason=reason)
@overload
async def set_permissions(
self,
target: Union[Member, Role],
*,
overwrite: Optional[Union[PermissionOverwrite, _Undefined]] = ...,
reason: Optional[str] = ...,
) -> None:
...
@overload
async def set_permissions(
self,
target: Union[Member, Role],
*,
reason: Optional[str] = ...,
**permissions: Optional[bool],
) -> None:
...
async def set_permissions(
self,
target: Union[Member, Role],
*,
overwrite: Any = _undefined,
reason: Optional[str] = None,
**permissions: Optional[bool],
) -> None:
r"""|coro|
Sets the channel specific permission overwrites for a target in the
channel.
The ``target`` parameter should either be a :class:`~discord.Member` or a
:class:`~discord.Role` that belongs to guild.
The ``overwrite`` parameter, if given, must either be ``None`` or
:class:`~discord.PermissionOverwrite`. For convenience, you can pass in
keyword arguments denoting :class:`~discord.Permissions` attributes. If this is
done, then you cannot mix the keyword arguments with the ``overwrite``
parameter.
If the ``overwrite`` parameter is ``None``, then the permission
overwrites are deleted.
You must have the :attr:`~discord.Permissions.manage_roles` permission to use this.
.. note::
This method *replaces* the old overwrites with the ones given.
Examples
----------
Setting allow and deny: ::
await message.channel.set_permissions(message.author, read_messages=True,
send_messages=False)
Deleting overwrites ::
await channel.set_permissions(member, overwrite=None)
Using :class:`~discord.PermissionOverwrite` ::
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = True
await channel.set_permissions(member, overwrite=overwrite)
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
target: Union[:class:`~discord.Member`, :class:`~discord.Role`]
The member or role to overwrite permissions for.
overwrite: Optional[:class:`~discord.PermissionOverwrite`]
The permissions to allow and deny to the target, or ``None`` to
delete the overwrite.
\*\*permissions
A keyword argument list of permissions to set for ease of use.
Cannot be mixed with ``overwrite``.
reason: Optional[:class:`str`]
The reason for doing this action. Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have permissions to edit channel specific permissions.
~discord.HTTPException
Editing channel specific permissions failed.
~discord.NotFound
The role or member being edited is not part of the guild.
TypeError
The ``overwrite`` parameter was invalid or the target type was not
:class:`~discord.Role` or :class:`~discord.Member`.
ValueError
The ``overwrite`` parameter and ``positions`` parameters were both
unset.
"""
http = self._state.http
if isinstance(target, User):
perm_type = _Overwrites.MEMBER
elif isinstance(target, Role):
perm_type = _Overwrites.ROLE
else:
raise ValueError('target parameter must be either Member or Role')
if overwrite is _undefined:
if len(permissions) == 0:
raise ValueError('No overwrite provided.')
try:
overwrite = PermissionOverwrite(**permissions)
except (ValueError, TypeError):
raise TypeError('Invalid permissions given to keyword arguments.')
else:
if len(permissions) > 0:
raise TypeError('Cannot mix overwrite and keyword arguments.')
# TODO: wait for event
if overwrite is None:
await http.delete_channel_permissions(self.id, target.id, reason=reason)
elif isinstance(overwrite, PermissionOverwrite):
(allow, deny) = overwrite.pair()
await http.edit_channel_permissions(
self.id, target.id, str(allow.value), str(deny.value), perm_type, reason=reason
)
else:
raise TypeError('Invalid overwrite type provided.')
async def _clone_impl(
self,
base_attrs: Dict[str, Any],
*,
name: Optional[str] = None,
reason: Optional[str] = None,
) -> Self:
base_attrs['permission_overwrites'] = [x._asdict() for x in self._overwrites]
base_attrs['parent_id'] = self.category_id
base_attrs['name'] = name or self.name
guild_id = self.guild.id
cls = self.__class__
data = await self._state.http.create_channel(guild_id, self.type.value, reason=reason, **base_attrs)
obj = cls(state=self._state, guild=self.guild, data=data)
# temporarily add it to the cache
self.guild._channels[obj.id] = obj # type: ignore # obj is a GuildChannel
return obj
async def clone(self, *, name: Optional[str] = None, reason: Optional[str] = None) -> Self:
"""|coro|
Clones this channel. This creates a channel with the same properties
as this channel.
You must have the :attr:`~discord.Permissions.manage_channels` permission to
do this.
.. versionadded:: 1.1
Parameters
------------
name: Optional[:class:`str`]
The name of the new channel. If not provided, defaults to this
channel name.
reason: Optional[:class:`str`]
The reason for cloning this channel. Shows up on the audit log.
Raises
-------
~discord.Forbidden
You do not have the proper permissions to create this channel.
~discord.HTTPException
Creating the channel failed.
Returns
--------
:class:`.abc.GuildChannel`
The channel that was created.
"""
raise NotImplementedError
@overload
async def move(
self,
*,
beginning: bool,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: Optional[str] = MISSING,
) -> None:
...
@overload
async def move(
self,
*,
end: bool,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: str = MISSING,
) -> None:
...
@overload
async def move(
self,
*,
before: Snowflake,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: str = MISSING,
) -> None:
...
@overload
async def move(
self,
*,
after: Snowflake,
offset: int = MISSING,
category: Optional[Snowflake] = MISSING,
sync_permissions: bool = MISSING,
reason: str = MISSING,
) -> None:
...
async def move(self, **kwargs: Any) -> None:
"""|coro|
A rich interface to help move a channel relative to other channels.
If exact position movement is required, ``edit`` should be used instead.
You must have the :attr:`~discord.Permissions.manage_channels` permission to
do this.
.. note::
Voice channels will always be sorted below text channels.
This is a Discord limitation.
.. versionadded:: 1.7
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` or
:exc:`ValueError` instead of ``InvalidArgument``.
Parameters
------------
beginning: :class:`bool`
Whether to move the channel to the beginning of the
channel list (or category if given).
This is mutually exclusive with ``end``, ``before``, and ``after``.
end: :class:`bool`
Whether to move the channel to the end of the
channel list (or category if given).
This is mutually exclusive with ``beginning``, ``before``, and ``after``.
before: :class:`~discord.abc.Snowflake`
The channel that should be before our current channel.
This is mutually exclusive with ``beginning``, ``end``, and ``after``.
after: :class:`~discord.abc.Snowflake`
The channel that should be after our current channel.
This is mutually exclusive with ``beginning``, ``end``, and ``before``.
offset: :class:`int`
The number of channels to offset the move by. For example,
an offset of ``2`` with ``beginning=True`` would move
it 2 after the beginning. A positive number moves it below
while a negative number moves it above. Note that this
number is relative and computed after the ``beginning``,
``end``, ``before``, and ``after`` parameters.
category: Optional[:class:`~discord.abc.Snowflake`]
The category to move this channel under.
If ``None`` is given then it moves it out of the category.
This parameter is ignored if moving a category channel.
sync_permissions: :class:`bool`
Whether to sync the permissions with the category (if given).
reason: :class:`str`
The reason for the move.
Raises
-------
ValueError
An invalid position was given.
TypeError
A bad mix of arguments were passed.
Forbidden
You do not have permissions to move the channel.
HTTPException
Moving the channel failed.
"""
if not kwargs:
return
beginning, end = kwargs.get('beginning'), kwargs.get('end')
before, after = kwargs.get('before'), kwargs.get('after')
offset = kwargs.get('offset', 0)
if sum(bool(a) for a in (beginning, end, before, after)) > 1:
raise TypeError('Only one of [before, after, end, beginning] can be used.')
bucket = self._sorting_bucket
parent_id = kwargs.get('category', MISSING)
# fmt: off
channels: List[GuildChannel]
if parent_id not in (MISSING, None):
parent_id = parent_id.id
channels = [
ch
for ch in self.guild.channels
if ch._sorting_bucket == bucket
and ch.category_id == parent_id
]
else:
channels = [
ch
for ch in self.guild.channels
if ch._sorting_bucket == bucket
and ch.category_id == self.category_id
]
# fmt: on
channels.sort(key=lambda c: (c.position, c.id))
try:
# Try to remove ourselves from the channel list
channels.remove(self)
except ValueError:
# If we're not there then it's probably due to not being in the category
pass
index = None
if beginning:
index = 0
elif end:
index = len(channels)
elif before:
index = next((i for i, c in enumerate(channels) if c.id == before.id), None)
elif after:
index = next((i + 1 for i, c in enumerate(channels) if c.id == after.id), None)
if index is None:
raise ValueError('Could not resolve appropriate move position')
channels.insert(max((index + offset), 0), self)
payload = []
lock_permissions = kwargs.get('sync_permissions', False)
reason = kwargs.get('reason')
for index, channel in enumerate(channels):
d = {'id': channel.id, 'position': index}
if parent_id is not MISSING and channel.id == self.id:
d.update(parent_id=parent_id, lock_permissions=lock_permissions)
payload.append(d)
await self._state.http.bulk_channel_update(self.guild.id, payload, reason=reason)
async def create_invite(
self,
*,
reason: Optional[str] = None,
max_age: int = 0,
max_uses: int = 0,
temporary: bool = False,
unique: bool = True,
target_type: Optional[InviteTarget] = None,
target_user: Optional[User] = None,
target_application_id: Optional[int] = None,
) -> Invite:
"""|coro|
Creates an instant invite from a text or voice channel.
You must have the :attr:`~discord.Permissions.create_instant_invite` permission to
do this.
Parameters
------------
max_age: :class:`int`
How long the invite should last in seconds. If it's 0 then the invite
doesn't expire. Defaults to ``0``.
max_uses: :class:`int`
How many uses the invite could be used for. If it's 0 then there
are unlimited uses. Defaults to ``0``.
temporary: :class:`bool`
Denotes that the invite grants temporary membership
(i.e. they get kicked after they disconnect). Defaults to ``False``.
unique: :class:`bool`
Indicates if a unique invite URL should be created. Defaults to True.
If this is set to ``False`` then it will return a previously created
invite.
reason: Optional[:class:`str`]
The reason for creating this invite. Shows up on the audit log.
target_type: Optional[:class:`.InviteTarget`]
The type of target for the voice channel invite, if any.
.. versionadded:: 2.0
target_user: Optional[:class:`User`]
The user whose stream to display for this invite, required if `target_type` is `TargetType.stream`. The user must be streaming in the channel.
.. versionadded:: 2.0
target_application_id:: Optional[:class:`int`]
The id of the embedded application for the invite, required if `target_type` is `TargetType.embedded_application`.
.. versionadded:: 2.0
Raises
-------
~discord.HTTPException
Invite creation failed.
~discord.NotFound
The channel that was passed is a category or an invalid channel.
Returns
--------
:class:`~discord.Invite`
The invite that was created.
"""
data = await self._state.http.create_invite(
self.id,
reason=reason,
max_age=max_age,
max_uses=max_uses,
temporary=temporary,
unique=unique,
target_type=target_type.value if target_type else None,
target_user_id=target_user.id if target_user else None,
target_application_id=target_application_id,
)
return Invite.from_incomplete(data=data, state=self._state)
async def invites(self) -> List[Invite]:
"""|coro|
Returns a list of all active instant invites from this channel.
You must have :attr:`~discord.Permissions.manage_channels` to get this information.
Raises
-------
~discord.Forbidden
You do not have proper permissions to get the information.
~discord.HTTPException
An error occurred while fetching the information.
Returns
-------
List[:class:`~discord.Invite`]
The list of invites that are currently active.
"""
state = self._state
data = await state.http.invites_from_channel(self.id)
guild = self.guild
return [Invite(state=state, data=invite, channel=self, guild=guild) for invite in data]
class Messageable:
"""An ABC that details the common operations on a model that can send messages.
The following classes implement this ABC:
- :class:`~discord.TextChannel`
- :class:`~discord.VoiceChannel`
- :class:`~discord.DMChannel`
- :class:`~discord.GroupChannel`
- :class:`~discord.PartialMessageable`
- :class:`~discord.User`
- :class:`~discord.Member`
- :class:`~discord.ext.commands.Context`
- :class:`~discord.Thread`
"""
__slots__ = ()
_state: ConnectionState
async def _get_channel(self) -> MessageableChannel:
raise NotImplementedError
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embed: Embed = ...,
file: File = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
view: View = ...,
suppress_embeds: bool = ...,
) -> Message:
...
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embed: Embed = ...,
files: Sequence[File] = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
view: View = ...,
suppress_embeds: bool = ...,
) -> Message:
...
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embeds: Sequence[Embed] = ...,
file: File = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
view: View = ...,
suppress_embeds: bool = ...,
) -> Message:
...
@overload
async def send(
self,
content: Optional[str] = ...,
*,
tts: bool = ...,
embeds: Sequence[Embed] = ...,
files: Sequence[File] = ...,
stickers: Sequence[Union[GuildSticker, StickerItem]] = ...,
delete_after: float = ...,
nonce: Union[str, int] = ...,
allowed_mentions: AllowedMentions = ...,
reference: Union[Message, MessageReference, PartialMessage] = ...,
mention_author: bool = ...,
view: View = ...,
suppress_embeds: bool = ...,
) -> Message:
...
async def send(
self,
content: Optional[str] = None,
*,
tts: bool = False,
embed: Optional[Embed] = None,
embeds: Optional[Sequence[Embed]] = None,
file: Optional[File] = None,
files: Optional[Sequence[File]] = None,
stickers: Optional[Sequence[Union[GuildSticker, StickerItem]]] = None,
delete_after: Optional[float] = None,
nonce: Optional[Union[str, int]] = None,
allowed_mentions: Optional[AllowedMentions] = None,
reference: Optional[Union[Message, MessageReference, PartialMessage]] = None,
mention_author: Optional[bool] = None,
view: Optional[View] = None,
suppress_embeds: bool = False,
) -> Message:
"""|coro|
Sends a message to the destination with the content given.
The content must be a type that can convert to a string through ``str(content)``.
If the content is set to ``None`` (the default), then the ``embed`` parameter must
be provided.
To upload a single file, the ``file`` parameter should be used with a
single :class:`~discord.File` object. To upload multiple files, the ``files``
parameter should be used with a :class:`list` of :class:`~discord.File` objects.
**Specifying both parameters will lead to an exception**.
To upload a single embed, the ``embed`` parameter should be used with a
single :class:`~discord.Embed` object. To upload multiple embeds, the ``embeds``
parameter should be used with a :class:`list` of :class:`~discord.Embed` objects.
**Specifying both parameters will lead to an exception**.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` or
:exc:`ValueError` instead of ``InvalidArgument``.
Parameters
------------
content: Optional[:class:`str`]
The content of the message to send.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
embed: :class:`~discord.Embed`
The rich embed for the content.
embeds: List[:class:`~discord.Embed`]
A list of embeds to upload. Must be a maximum of 10.
.. versionadded:: 2.0
file: :class:`~discord.File`
The file to upload.
files: List[:class:`~discord.File`]
A list of files to upload. Must be a maximum of 10.
nonce: :class:`int`
The nonce to use for sending this message. If the message was successfully sent,
then the message will have a nonce with this value.
delete_after: :class:`float`
If provided, the number of seconds to wait in the background
before deleting the message we just sent. If the deletion fails,
then it is silently ignored.
allowed_mentions: :class:`~discord.AllowedMentions`
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
reference: Union[:class:`~discord.Message`, :class:`~discord.MessageReference`, :class:`~discord.PartialMessage`]
A reference to the :class:`~discord.Message` to which you are replying, this can be created using
:meth:`~discord.Message.to_reference` or passed directly as a :class:`~discord.Message`. You can control
whether this mentions the author of the referenced message using the :attr:`~discord.AllowedMentions.replied_user`
attribute of ``allowed_mentions`` or by setting ``mention_author``.
.. versionadded:: 1.6
mention_author: Optional[:class:`bool`]
If set, overrides the :attr:`~discord.AllowedMentions.replied_user` attribute of ``allowed_mentions``.
.. versionadded:: 1.6
view: :class:`discord.ui.View`
A Discord UI View to add to the message.
.. versionadded:: 2.0
stickers: Sequence[Union[:class:`~discord.GuildSticker`, :class:`~discord.StickerItem`]]
A list of stickers to upload. Must be a maximum of 3.
.. versionadded:: 2.0
suppress_embeds: :class:`bool`
Whether to suppress embeds for the message. This sends the message without any embeds if set to ``True``.
.. versionadded:: 2.0
Raises
--------
~discord.HTTPException
Sending the message failed.
~discord.Forbidden
You do not have the proper permissions to send the message.
ValueError
The ``files`` or ``embeds`` list is not of the appropriate size.
TypeError
You specified both ``file`` and ``files``,
or you specified both ``embed`` and ``embeds``,
or the ``reference`` object is not a :class:`~discord.Message`,
:class:`~discord.MessageReference` or :class:`~discord.PartialMessage`.
Returns
---------
:class:`~discord.Message`
The message that was sent.
"""
channel = await self._get_channel()
state = self._state
content = str(content) if content is not None else None
previous_allowed_mention = state.allowed_mentions
if stickers is not None:
sticker_ids: SnowflakeList = [sticker.id for sticker in stickers]
else:
sticker_ids = MISSING
if reference is not None:
try:
reference_dict = reference.to_message_reference_dict()
except AttributeError:
raise TypeError('reference parameter must be Message, MessageReference, or PartialMessage') from None
else:
reference_dict = MISSING
if view and not hasattr(view, '__discord_ui_view__'):
raise TypeError(f'view parameter must be View not {view.__class__!r}')
if suppress_embeds:
from .message import MessageFlags # circular import
flags = MessageFlags._from_value(4)
else:
flags = MISSING
with handle_message_parameters(
content=content,
tts=tts,
file=file if file is not None else MISSING,
files=files if files is not None else MISSING,
embed=embed if embed is not None else MISSING,
embeds=embeds if embeds is not None else MISSING,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=reference_dict,
previous_allowed_mentions=previous_allowed_mention,
mention_author=mention_author,
stickers=sticker_ids,
view=view,
flags=flags,
) as params:
data = await state.http.send_message(channel.id, params=params)
ret = state.create_message(channel=channel, data=data)
if view:
state.store_view(view, ret.id)
if delete_after is not None:
await ret.delete(delay=delete_after)
return ret
def typing(self) -> Typing:
"""Returns an asynchronous context manager that allows you to send a typing indicator to
the destination for an indefinite period of time, or 10 seconds if the context manager
is called using ``await``.
Example Usage: ::
async with channel.typing():
# simulate something heavy
await asyncio.sleep(20)
await channel.send('Done!')
Example Usage: ::
await channel.typing()
# Do some computational magic for about 10 seconds
await channel.send('Done!')
.. versionchanged:: 2.0
This no longer works with the ``with`` syntax, ``async with`` must be used instead.
.. versionchanged:: 2.0
Added functionality to ``await`` the context manager to send a typing indicator for 10 seconds.
"""
return Typing(self)
async def fetch_message(self, id: int, /) -> Message:
"""|coro|
Retrieves a single :class:`~discord.Message` from the destination.
Parameters
------------
id: :class:`int`
The message ID to look for.
Raises
--------
~discord.NotFound
The specified message was not found.
~discord.Forbidden
You do not have the permissions required to get a message.
~discord.HTTPException
Retrieving the message failed.
Returns
--------
:class:`~discord.Message`
The message asked for.
"""
channel = await self._get_channel()
data = await self._state.http.get_message(channel.id, id)
return self._state.create_message(channel=channel, data=data)
async def pins(self) -> List[Message]:
"""|coro|
Retrieves all messages that are currently pinned in the channel.
.. note::
Due to a limitation with the Discord API, the :class:`.Message`
objects returned by this method do not contain complete
:attr:`.Message.reactions` data.
Raises
-------
~discord.HTTPException
Retrieving the pinned messages failed.
Returns
--------
List[:class:`~discord.Message`]
The messages that are currently pinned.
"""
channel = await self._get_channel()
state = self._state
data = await state.http.pins_from(channel.id)
return [state.create_message(channel=channel, data=m) for m in data]
async def history(
self,
*,
limit: Optional[int] = 100,
before: Optional[SnowflakeTime] = None,
after: Optional[SnowflakeTime] = None,
around: Optional[SnowflakeTime] = None,
oldest_first: Optional[bool] = None,
) -> AsyncIterator[Message]:
"""Returns an :term:`asynchronous iterator` that enables receiving the destination's message history.
You must have :attr:`~discord.Permissions.read_message_history` permissions to use this.
Examples
---------
Usage ::
counter = 0
async for message in channel.history(limit=200):
if message.author == client.user:
counter += 1
Flattening into a list: ::
messages = [message async for message in channel.history(limit=123)]
# messages is now a list of Message...
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of messages to retrieve.
If ``None``, retrieves every message in the channel. Note, however,
that this would make it a slow operation.
before: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve messages before this date or message.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
after: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve messages after this date or message.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
around: Optional[Union[:class:`~discord.abc.Snowflake`, :class:`datetime.datetime`]]
Retrieve messages around this date or message.
If a datetime is provided, it is recommended to use a UTC aware datetime.
If the datetime is naive, it is assumed to be local time.
When using this argument, the maximum limit is 101. Note that if the limit is an
even number then this will return at most limit + 1 messages.
oldest_first: Optional[:class:`bool`]
If set to ``True``, return messages in oldest->newest order. Defaults to ``True`` if
``after`` is specified, otherwise ``False``.
Raises
------
~discord.Forbidden
You do not have permissions to get channel message history.
~discord.HTTPException
The request to get message history failed.
Yields
-------
:class:`~discord.Message`
The message with the message data parsed.
"""
async def _around_strategy(retrieve, around, limit):
if not around:
return []
around_id = around.id if around else None
data = await self._state.http.logs_from(channel.id, retrieve, around=around_id)
return data, None, limit
async def _after_strategy(retrieve, after, limit):
after_id = after.id if after else None
data = await self._state.http.logs_from(channel.id, retrieve, after=after_id)
if data:
if limit is not None:
limit -= len(data)
after = Object(id=int(data[0]['id']))
return data, after, limit
async def _before_strategy(retrieve, before, limit):
before_id = before.id if before else None
data = await self._state.http.logs_from(channel.id, retrieve, before=before_id)
if data:
if limit is not None:
limit -= len(data)
before = Object(id=int(data[-1]['id']))
return data, before, limit
if isinstance(before, datetime):
before = Object(id=utils.time_snowflake(before, high=False))
if isinstance(after, datetime):
after = Object(id=utils.time_snowflake(after, high=True))
if isinstance(around, datetime):
around = Object(id=utils.time_snowflake(around))
if oldest_first is None:
reverse = after is not None
else:
reverse = oldest_first
after = after or OLDEST_OBJECT
predicate = None
if around:
if limit is None:
raise ValueError('history does not support around with limit=None')
if limit > 101:
raise ValueError("history max limit 101 when specifying around parameter")
# Strange Discord quirk
limit = 100 if limit == 101 else limit
strategy, state = _around_strategy, around
if before and after:
predicate = lambda m: after.id < int(m['id']) < before.id
elif before:
predicate = lambda m: int(m['id']) < before.id
elif after:
predicate = lambda m: after.id < int(m['id'])
elif reverse:
strategy, state = _after_strategy, after
if before:
predicate = lambda m: int(m['id']) < before.id
else:
strategy, state = _before_strategy, before
if after and after != OLDEST_OBJECT:
predicate = lambda m: int(m['id']) > after.id
channel = await self._get_channel()
while True:
retrieve = min(100 if limit is None else limit, 100)
if retrieve < 1:
return
data, state, limit = await strategy(retrieve, state, limit)
# Terminate loop on next iteration; there's no data left after this
if len(data) < 100:
limit = 0
if reverse:
data = reversed(data)
if predicate:
data = filter(predicate, data)
for raw_message in data:
yield self._state.create_message(channel=channel, data=raw_message)
class Connectable(Protocol):
"""An ABC that details the common operations on a channel that can
connect to a voice server.
The following implement this ABC:
- :class:`~discord.VoiceChannel`
- :class:`~discord.StageChannel`
"""
__slots__ = ()
_state: ConnectionState
def _get_voice_client_key(self) -> Tuple[int, str]:
raise NotImplementedError
def _get_voice_state_pair(self) -> Tuple[int, int]:
raise NotImplementedError
async def connect(
self,
*,
timeout: float = 60.0,
reconnect: bool = True,
cls: Callable[[Client, Connectable], T] = VoiceClient,
self_deaf: bool = False,
self_mute: bool = False,
) -> T:
"""|coro|
Connects to voice and creates a :class:`~discord.VoiceClient` to establish
your connection to the voice server.
This requires :attr:`~discord.Intents.voice_states`.
Parameters
-----------
timeout: :class:`float`
The timeout in seconds to wait for the voice endpoint.
reconnect: :class:`bool`
Whether the bot should automatically attempt
a reconnect if a part of the handshake fails
or the gateway goes down.
cls: Type[:class:`~discord.VoiceProtocol`]
A type that subclasses :class:`~discord.VoiceProtocol` to connect with.
Defaults to :class:`~discord.VoiceClient`.
self_mute: :class:`bool`
Indicates if the client should be self-muted.
.. versionadded:: 2.0
self_deaf: :class:`bool`
Indicates if the client should be self-deafened.
.. versionadded:: 2.0
Raises
-------
asyncio.TimeoutError
Could not connect to the voice channel in time.
~discord.ClientException
You are already connected to a voice channel.
~discord.opus.OpusNotLoaded
The opus library has not been loaded.
Returns
--------
:class:`~discord.VoiceProtocol`
A voice client that is fully connected to the voice server.
"""
key_id, _ = self._get_voice_client_key()
state = self._state
if state._get_voice_client(key_id):
raise ClientException('Already connected to a voice channel.')
client = state._get_client()
voice: T = cls(client, self)
if not isinstance(voice, VoiceProtocol):
raise TypeError('Type must meet VoiceProtocol abstract base class.')
state._add_voice_client(key_id, voice)
try:
await voice.connect(timeout=timeout, reconnect=reconnect, self_deaf=self_deaf, self_mute=self_mute)
except asyncio.TimeoutError:
try:
await voice.disconnect(force=True)
except Exception:
# we don't care if disconnect failed because connection failed
pass
raise # re-raise
return voice
| 34.226695 | 154 | 0.595946 |
6855f86f9dbd0042e9b7c5f640f4f0a0337b4b1e | 12,116 | py | Python | feedparser/api.py | alexreg/feedparser | 8a2a71b72d5b1454d456759d6270fa1dd18dfc4f | [
"BSD-2-Clause"
] | 1,452 | 2015-01-06T07:56:10.000Z | 2022-03-31T11:20:18.000Z | feedparser/api.py | alexreg/feedparser | 8a2a71b72d5b1454d456759d6270fa1dd18dfc4f | [
"BSD-2-Clause"
] | 279 | 2015-02-02T22:32:10.000Z | 2022-03-29T21:45:56.000Z | feedparser/api.py | alexreg/feedparser | 8a2a71b72d5b1454d456759d6270fa1dd18dfc4f | [
"BSD-2-Clause"
] | 325 | 2015-02-03T21:28:01.000Z | 2022-03-24T23:01:41.000Z | # The public API for feedparser
# Copyright 2010-2021 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime
import io
import time
from typing import Dict, List, Union
import urllib.error
import urllib.parse
import xml.sax
from .datetimes import registerDateHandler, _parse_date
from .encodings import convert_to_utf8
from .html import BaseHTMLProcessor
from . import http
from .mixin import XMLParserMixin
from .parsers.loose import LooseXMLParser
from .parsers.strict import StrictXMLParser
from .parsers.json import JSONParser
from .sanitizer import replace_doctype
from .urls import convert_to_idn, make_safe_absolute_uri
from .util import FeedParserDict
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
_XML_AVAILABLE = True
SUPPORTED_VERSIONS = {
'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'json1': 'JSON feed 1',
}
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
:return: A bytes object.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string.read()
if isinstance(url_file_stream_or_string, str) \
and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
return http.get(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result)
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
with open(url_file_stream_or_string, 'rb') as f:
data = f.read()
except (IOError, UnicodeEncodeError, TypeError, ValueError):
# if url_file_stream_or_string is a str object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
else:
return data
# treat url_file_stream_or_string as string
if not isinstance(url_file_stream_or_string, bytes):
return url_file_stream_or_string.encode('utf-8')
return url_file_stream_or_string
class LooseFeedParser(LooseXMLParser, XMLParserMixin, BaseHTMLProcessor):
pass
class StrictFeedParser(StrictXMLParser, XMLParserMixin, xml.sax.handler.ContentHandler):
pass
def parse(
url_file_stream_or_string,
etag: str = None,
modified: Union[str, datetime.datetime, time.struct_time] = None,
agent: str = None,
referrer: str = None,
handlers: List = None,
request_headers: Dict[str, str] = None,
response_headers: Dict[str, str] = None,
resolve_relative_uris: bool = None,
sanitize_html: bool = None,
) -> FeedParserDict:
"""Parse a feed from a URL, file, stream, or string.
:param url_file_stream_or_string:
File-like object, URL, file path, or string. Both byte and text strings
are accepted. If necessary, encoding will be derived from the response
headers or automatically detected.
Note that strings may trigger network I/O or filesystem access
depending on the value. Wrap an untrusted string in
a :class:`io.StringIO` or :class:`io.BytesIO` to avoid this. Do not
pass untrusted strings to this function.
When a URL is not passed the feed location to use in relative URL
resolution should be passed in the ``Content-Location`` response header
(see ``response_headers`` below).
:param etag:
HTTP ``ETag`` request header.
:param modified:
HTTP ``Last-Modified`` request header.
:param agent:
HTTP ``User-Agent`` request header, which defaults to
the value of :data:`feedparser.USER_AGENT`.
:param referrer:
HTTP ``Referer`` [sic] request header.
:param handlers:
A list of handlers that will be passed to urllib2.
:param request_headers:
A mapping of HTTP header name to HTTP header value to add to the
request, overriding internally generated values.
:param response_headers:
A mapping of HTTP header name to HTTP header value. Multiple values may
be joined with a comma. If a HTTP request was made, these headers
override any matching headers in the response. Otherwise this specifies
the entirety of the response headers.
:param resolve_relative_uris:
Should feedparser attempt to resolve relative URIs absolute ones within
HTML content? Defaults to the value of
:data:`feedparser.RESOLVE_RELATIVE_URIS`, which is ``True``.
:param sanitize_html:
Should feedparser skip HTML sanitization? Only disable this if you know
what you are doing! Defaults to the value of
:data:`feedparser.SANITIZE_HTML`, which is ``True``.
"""
# Avoid a cyclic import.
if not agent:
import feedparser
agent = feedparser.USER_AGENT
if sanitize_html is None:
import feedparser
sanitize_html = bool(feedparser.SANITIZE_HTML)
if resolve_relative_uris is None:
import feedparser
resolve_relative_uris = bool(feedparser.RESOLVE_RELATIVE_URIS)
result = FeedParserDict(
bozo=False,
entries=[],
feed=FeedParserDict(),
headers={},
)
try:
data = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers, result)
except urllib.error.URLError as error:
result.update({
'bozo': True,
'bozo_exception': error,
})
return result
if not data:
return result
# overwrite existing headers using response_headers
result['headers'].update(response_headers or {})
data = convert_to_utf8(result['headers'], data, result)
use_json_parser = result['content-type'] == 'application/json'
use_strict_parser = result['encoding'] and True or False
if not use_json_parser:
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = result['headers'].get('content-location', '')
href = result.get('href', '')
baseuri = make_safe_absolute_uri(href, contentloc) or make_safe_absolute_uri(contentloc) or href
baselang = result['headers'].get('content-language', None)
if isinstance(baselang, bytes) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = False
feed_parser: Union[JSONParser, StrictFeedParser, LooseFeedParser]
if use_json_parser:
result['version'] = None
feed_parser = JSONParser(baseuri, baselang, 'utf-8')
try:
feed_parser.feed(data)
except Exception as e:
result['bozo'] = 1
result['bozo_exception'] = e
elif use_strict_parser:
# Initialize the SAX parser.
feed_parser = StrictFeedParser(baseuri, baselang, 'utf-8')
feed_parser.resolve_relative_uris = resolve_relative_uris
feed_parser.sanitize_html = sanitize_html
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# Disable downloading external doctype references, if possible.
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feed_parser)
saxparser.setErrorHandler(feed_parser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(io.BytesIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException as e:
result['bozo'] = 1
result['bozo_exception'] = feed_parser.exc or e
use_strict_parser = False
# The loose XML parser will be tried if the JSON parser was not used,
# and if the strict XML parser was not used (or if if it failed).
if not use_json_parser and not use_strict_parser:
feed_parser = LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feed_parser.resolve_relative_uris = resolve_relative_uris
feed_parser.sanitize_html = sanitize_html
feed_parser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feed_parser.feeddata
result['entries'] = feed_parser.entries
result['version'] = result['version'] or feed_parser.version
if isinstance(feed_parser, JSONParser):
result['namespaces'] = {}
else:
result['namespaces'] = feed_parser.namespaces_in_use
return result
| 39.855263 | 124 | 0.698085 |
8fa28b82796ff1989b56b27c5b05c4106998d5bb | 1,613 | py | Python | code/utiy.py | hjl2626/stu_manager | 63b44dd0810d9dc324b458667ebd3f939b5ad5d3 | [
"Apache-2.0"
] | 1 | 2018-07-29T10:53:41.000Z | 2018-07-29T10:53:41.000Z | code/utiy.py | hjl2626/stu_manager | 63b44dd0810d9dc324b458667ebd3f939b5ad5d3 | [
"Apache-2.0"
] | null | null | null | code/utiy.py | hjl2626/stu_manager | 63b44dd0810d9dc324b458667ebd3f939b5ad5d3 | [
"Apache-2.0"
] | null | null | null | import random
def _get_name():
num = random.randint(2, 3)
name = ''.join([chr(random.randint(0x4E00, 0x8377)) for i in range(num)])
if num == 2:
return name[0] + ' ' + name[1]
return name
def _get_id():
return ''.join(random.sample('0123456789', 6))
def _get_sco():
return '%.1f' % (random.randint(10, 100))
def _get_sex():
return ['男', '女'][random.randint(0, 1)]
def init_write_file(filename, num=50, flag=False):
result = []
for i in range(num):
if flag:
tmp = ','.join([_get_id(), _get_name(), _get_sex(), _get_sco(), _get_sco(), _get_sco(), '0.0', '0.0'])
else:
tmp = ','.join([_get_id(), _get_name(), _get_sex(), _get_sco(), _get_sco(), _get_sco()])
result.append(tmp)
with open(filename, 'w+') as f:
f.write('\n'.join(result))
def print_menu():
print('*************************************************************************************************')
print('\t\t1.创建文件 \t\t\t\t 2.显示记录 \t\t\t\t 3.编辑记录')
print('\t\t4.增加记录 \t\t\t\t 5.删除记录 \t\t\t\t 6.查询记录')
print('\t\t7.排序记录 \t\t\t\t 8.统计记录 \t\t\t\t 0.退出')
print('*************************************************************************************************')
def plot(name, xdata, ydata):
"""
Simple demo of a horizontal bar chart.
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(10,6))
import matplotlib.pyplot as plt
y_pos = range(1, 11)
plt.barh(y_pos, xdata, align='center', alpha=0.9)
plt.yticks(y_pos, ydata)
plt.xlabel('人数')
plt.title(name)
plt.show()
| 28.298246 | 114 | 0.50031 |
e5fa59c737fc8afbd1e405f75796b4a44d8e2d93 | 503 | py | Python | qmla/exploration_strategies/demos/__init__.py | Evan1415/QMLA | 4521f7c08456a4494aed7c1b78d8ded5ea40f3d8 | [
"MIT"
] | null | null | null | qmla/exploration_strategies/demos/__init__.py | Evan1415/QMLA | 4521f7c08456a4494aed7c1b78d8ded5ea40f3d8 | [
"MIT"
] | null | null | null | qmla/exploration_strategies/demos/__init__.py | Evan1415/QMLA | 4521f7c08456a4494aed7c1b78d8ded5ea40f3d8 | [
"MIT"
] | null | null | null |
from qmla.exploration_strategies.demos.example import *
from qmla.exploration_strategies.demos.analytical import *
from qmla.exploration_strategies.demos.demo_lattices import *
from qmla.exploration_strategies.demos.learning_probes import *
from qmla.exploration_strategies.demos.heuristics import *
from qmla.exploration_strategies.demos.demo_genetic_algorithm import *
from qmla.exploration_strategies.demos.obj_fncs import *
from qmla.exploration_strategies.demos.bayes_factor_by_fscore import * | 55.888889 | 70 | 0.864811 |
64f09598975a9b23a6c64953c59d0644495239a1 | 572 | py | Python | setup.py | JohnCrickett/CommandoMaths | 278e178762cd8b0cac96fe3fc15a80212cff33c6 | [
"MIT"
] | null | null | null | setup.py | JohnCrickett/CommandoMaths | 278e178762cd8b0cac96fe3fc15a80212cff33c6 | [
"MIT"
] | null | null | null | setup.py | JohnCrickett/CommandoMaths | 278e178762cd8b0cac96fe3fc15a80212cff33c6 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='CommandoMaths',
author='John Crickett',
author_email='john@trivialbusiness.co.uk',
version='0.1.0.dev',
packages=find_packages(),
package_data={'commandomaths': ['resources/*']},
license='See LICENSE',
long_description=open('README.md').read(),
install_requires=["pygame >= 1.9.2a0"
],
entry_points={
'console_scripts': [
'commandomaths=commandomaths.main:main',
],
},
zip_safe=True,
include_package_data=True
)
| 26 | 52 | 0.618881 |
6bfa812b2856d854dcc773e9c780bf84d1ced696 | 1,530 | py | Python | neural-tangents/examples/util.py | DarrenZhang01/Neural_Tangents_TensorFlow | 2fd360c8b1b8c9106044034f6a8b5c2734db9c3d | [
"Apache-2.0"
] | 4 | 2020-12-25T17:37:13.000Z | 2022-01-03T17:00:23.000Z | neural-tangents/examples/util.py | DarrenZhang01/TensorFlow_GSoC | 2fd360c8b1b8c9106044034f6a8b5c2734db9c3d | [
"Apache-2.0"
] | 33 | 2020-07-18T18:57:54.000Z | 2020-08-17T13:58:46.000Z | neural-tangents/examples/util.py | DarrenZhang01/Neural_Tangents_TensorFlow | 2fd360c8b1b8c9106044034f6a8b5c2734db9c3d | [
"Apache-2.0"
] | 1 | 2021-08-16T19:00:06.000Z | 2021-08-16T19:00:06.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of utility operations for running examples.
"""
from tensorflow.python.ops import numpy_ops as np
def _accuracy(y, y_hat):
"""Compute the accuracy of the predictions with respect to one-hot labels."""
return np.mean(np.argmax(y, axis=1) == np.argmax(y_hat, axis=1))
def print_summary(name, labels, net_p, lin_p, loss):
"""Print summary information comparing a network with its linearization."""
print('\nEvaluating Network on {} data.'.format(name))
print('---------------------------------------')
print('Network Accuracy = {}'.format(_accuracy(net_p, labels)))
print('Network Loss = {}'.format(loss(net_p, labels)))
if lin_p is not None:
print('Linearization Accuracy = {}'.format(_accuracy(lin_p, labels)))
print('Linearization Loss = {}'.format(loss(lin_p, labels)))
print('RMSE of predictions: {}'.format(
np.sqrt(np.mean((net_p - lin_p) ** 2))))
print('---------------------------------------')
| 39.230769 | 79 | 0.677778 |
1d189508960364e96144da62b95c6b63cb331e62 | 8,987 | py | Python | homeassistant/components/comfoconnect/sensor.py | zcmosz/home-assistant | 58b4efe880478f8c66f0bf53e957268667301220 | [
"Apache-2.0"
] | 1 | 2021-01-10T05:35:53.000Z | 2021-01-10T05:35:53.000Z | homeassistant/components/comfoconnect/sensor.py | zcmosz/home-assistant | 58b4efe880478f8c66f0bf53e957268667301220 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/comfoconnect/sensor.py | zcmosz/home-assistant | 58b4efe880478f8c66f0bf53e957268667301220 | [
"Apache-2.0"
] | 1 | 2020-02-24T16:17:42.000Z | 2020-02-24T16:17:42.000Z | """Platform to control a Zehnder ComfoAir Q350/450/600 ventilation unit."""
import logging
from pycomfoconnect import (
SENSOR_BYPASS_STATE,
SENSOR_DAYS_TO_REPLACE_FILTER,
SENSOR_FAN_EXHAUST_DUTY,
SENSOR_FAN_EXHAUST_FLOW,
SENSOR_FAN_EXHAUST_SPEED,
SENSOR_FAN_SUPPLY_DUTY,
SENSOR_FAN_SUPPLY_FLOW,
SENSOR_FAN_SUPPLY_SPEED,
SENSOR_HUMIDITY_EXHAUST,
SENSOR_HUMIDITY_EXTRACT,
SENSOR_HUMIDITY_OUTDOOR,
SENSOR_HUMIDITY_SUPPLY,
SENSOR_POWER_CURRENT,
SENSOR_TEMPERATURE_EXHAUST,
SENSOR_TEMPERATURE_EXTRACT,
SENSOR_TEMPERATURE_OUTDOOR,
SENSOR_TEMPERATURE_SUPPLY,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_DEVICE_CLASS,
CONF_RESOURCES,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
POWER_WATT,
TEMP_CELSIUS,
TIME_DAYS,
TIME_HOURS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from . import DOMAIN, SIGNAL_COMFOCONNECT_UPDATE_RECEIVED, ComfoConnectBridge
ATTR_AIR_FLOW_EXHAUST = "air_flow_exhaust"
ATTR_AIR_FLOW_SUPPLY = "air_flow_supply"
ATTR_BYPASS_STATE = "bypass_state"
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_CURRENT_TEMPERATURE = "current_temperature"
ATTR_DAYS_TO_REPLACE_FILTER = "days_to_replace_filter"
ATTR_EXHAUST_FAN_DUTY = "exhaust_fan_duty"
ATTR_EXHAUST_FAN_SPEED = "exhaust_fan_speed"
ATTR_EXHAUST_HUMIDITY = "exhaust_humidity"
ATTR_EXHAUST_TEMPERATURE = "exhaust_temperature"
ATTR_OUTSIDE_HUMIDITY = "outside_humidity"
ATTR_OUTSIDE_TEMPERATURE = "outside_temperature"
ATTR_POWER_CURRENT = "power_usage"
ATTR_SUPPLY_FAN_DUTY = "supply_fan_duty"
ATTR_SUPPLY_FAN_SPEED = "supply_fan_speed"
ATTR_SUPPLY_HUMIDITY = "supply_humidity"
ATTR_SUPPLY_TEMPERATURE = "supply_temperature"
_LOGGER = logging.getLogger(__name__)
ATTR_ICON = "icon"
ATTR_ID = "id"
ATTR_LABEL = "label"
ATTR_MULTIPLIER = "multiplier"
ATTR_UNIT = "unit"
SENSOR_TYPES = {
ATTR_CURRENT_TEMPERATURE: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_LABEL: "Inside Temperature",
ATTR_UNIT: TEMP_CELSIUS,
ATTR_ICON: "mdi:thermometer",
ATTR_ID: SENSOR_TEMPERATURE_EXTRACT,
ATTR_MULTIPLIER: 0.1,
},
ATTR_CURRENT_HUMIDITY: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_LABEL: "Inside Humidity",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:water-percent",
ATTR_ID: SENSOR_HUMIDITY_EXTRACT,
},
ATTR_OUTSIDE_TEMPERATURE: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_LABEL: "Outside Temperature",
ATTR_UNIT: TEMP_CELSIUS,
ATTR_ICON: "mdi:thermometer",
ATTR_ID: SENSOR_TEMPERATURE_OUTDOOR,
ATTR_MULTIPLIER: 0.1,
},
ATTR_OUTSIDE_HUMIDITY: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_LABEL: "Outside Humidity",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:water-percent",
ATTR_ID: SENSOR_HUMIDITY_OUTDOOR,
},
ATTR_SUPPLY_TEMPERATURE: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_LABEL: "Supply Temperature",
ATTR_UNIT: TEMP_CELSIUS,
ATTR_ICON: "mdi:thermometer",
ATTR_ID: SENSOR_TEMPERATURE_SUPPLY,
ATTR_MULTIPLIER: 0.1,
},
ATTR_SUPPLY_HUMIDITY: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_LABEL: "Supply Humidity",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:water-percent",
ATTR_ID: SENSOR_HUMIDITY_SUPPLY,
},
ATTR_SUPPLY_FAN_SPEED: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Supply Fan Speed",
ATTR_UNIT: "rpm",
ATTR_ICON: "mdi:fan",
ATTR_ID: SENSOR_FAN_SUPPLY_SPEED,
},
ATTR_SUPPLY_FAN_DUTY: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Supply Fan Duty",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:fan",
ATTR_ID: SENSOR_FAN_SUPPLY_DUTY,
},
ATTR_EXHAUST_FAN_SPEED: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Exhaust Fan Speed",
ATTR_UNIT: "rpm",
ATTR_ICON: "mdi:fan",
ATTR_ID: SENSOR_FAN_EXHAUST_SPEED,
},
ATTR_EXHAUST_FAN_DUTY: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Exhaust Fan Duty",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:fan",
ATTR_ID: SENSOR_FAN_EXHAUST_DUTY,
},
ATTR_EXHAUST_TEMPERATURE: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_LABEL: "Exhaust Temperature",
ATTR_UNIT: TEMP_CELSIUS,
ATTR_ICON: "mdi:thermometer",
ATTR_ID: SENSOR_TEMPERATURE_EXHAUST,
ATTR_MULTIPLIER: 0.1,
},
ATTR_EXHAUST_HUMIDITY: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_LABEL: "Exhaust Humidity",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:water-percent",
ATTR_ID: SENSOR_HUMIDITY_EXHAUST,
},
ATTR_AIR_FLOW_SUPPLY: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Supply airflow",
ATTR_UNIT: f"m³/{TIME_HOURS}",
ATTR_ICON: "mdi:fan",
ATTR_ID: SENSOR_FAN_SUPPLY_FLOW,
},
ATTR_AIR_FLOW_EXHAUST: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Exhaust airflow",
ATTR_UNIT: f"m³/{TIME_HOURS}",
ATTR_ICON: "mdi:fan",
ATTR_ID: SENSOR_FAN_EXHAUST_FLOW,
},
ATTR_BYPASS_STATE: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Bypass State",
ATTR_UNIT: "%",
ATTR_ICON: "mdi:camera-iris",
ATTR_ID: SENSOR_BYPASS_STATE,
},
ATTR_DAYS_TO_REPLACE_FILTER: {
ATTR_DEVICE_CLASS: None,
ATTR_LABEL: "Days to replace filter",
ATTR_UNIT: TIME_DAYS,
ATTR_ICON: "mdi:calendar",
ATTR_ID: SENSOR_DAYS_TO_REPLACE_FILTER,
},
ATTR_POWER_CURRENT: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_LABEL: "Power usage",
ATTR_UNIT: POWER_WATT,
ATTR_ICON: "mdi:flash",
ATTR_ID: SENSOR_POWER_CURRENT,
},
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_RESOURCES, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ComfoConnect fan platform."""
ccb = hass.data[DOMAIN]
sensors = []
for resource in config[CONF_RESOURCES]:
sensors.append(
ComfoConnectSensor(
name=f"{ccb.name} {SENSOR_TYPES[resource][ATTR_LABEL]}",
ccb=ccb,
sensor_type=resource,
)
)
add_entities(sensors, True)
class ComfoConnectSensor(Entity):
"""Representation of a ComfoConnect sensor."""
def __init__(self, name, ccb: ComfoConnectBridge, sensor_type) -> None:
"""Initialize the ComfoConnect sensor."""
self._ccb = ccb
self._sensor_type = sensor_type
self._sensor_id = SENSOR_TYPES[self._sensor_type][ATTR_ID]
self._name = name
async def async_added_to_hass(self):
"""Register for sensor updates."""
_LOGGER.debug(
"Registering for sensor %s (%d)", self._sensor_type, self._sensor_id
)
async_dispatcher_connect(
self.hass,
SIGNAL_COMFOCONNECT_UPDATE_RECEIVED.format(self._sensor_id),
self._handle_update,
)
await self.hass.async_add_executor_job(
self._ccb.comfoconnect.register_sensor, self._sensor_id
)
def _handle_update(self, value):
"""Handle update callbacks."""
_LOGGER.debug(
"Handle update for sensor %s (%d): %s",
self._sensor_type,
self._sensor_id,
value,
)
self._ccb.data[self._sensor_id] = round(
value * SENSOR_TYPES[self._sensor_type].get(ATTR_MULTIPLIER, 1), 2
)
self.schedule_update_ha_state()
@property
def state(self):
"""Return the state of the entity."""
try:
return self._ccb.data[self._sensor_id]
except KeyError:
return None
@property
def should_poll(self) -> bool:
"""Do not poll."""
return False
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return f"{self._ccb.unique_id}-{self._sensor_type}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
return SENSOR_TYPES[self._sensor_type][ATTR_ICON]
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return SENSOR_TYPES[self._sensor_type][ATTR_UNIT]
@property
def device_class(self):
"""Return the device_class."""
return SENSOR_TYPES[self._sensor_type][ATTR_DEVICE_CLASS]
| 30.464407 | 80 | 0.663737 |
92329787e1f78fa2b395079190e18bf60385017d | 26,131 | py | Python | anadama2/grid/grid.py | biobakery/anadama2 | 39f791c8d8588ff92202c115c8e8e7fc25153c93 | [
"MIT"
] | 4 | 2020-06-08T22:10:48.000Z | 2021-07-27T13:57:43.000Z | anadama2/grid/grid.py | biobakery/anadama2 | 39f791c8d8588ff92202c115c8e8e7fc25153c93 | [
"MIT"
] | null | null | null | anadama2/grid/grid.py | biobakery/anadama2 | 39f791c8d8588ff92202c115c8e8e7fc25153c93 | [
"MIT"
] | 1 | 2020-09-10T08:29:22.000Z | 2020-09-10T08:29:22.000Z | # -*- coding: utf-8 -*-
import os
import sys
import threading
try:
import Queue
except ImportError:
import queue as Queue
import time
import tempfile
import string
import logging
import itertools
import re
import six
from .. import runners
from .. import picklerunner
from ..helpers import format_command
from ..helpers import file_size
if os.name == 'posix' and sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
class GridJobRequires(object):
"""Defines the resources required for a task on the grid.
:param time: Wall clock time in minutes.
:type time: int
:param mem: RAM Usage in MB (8*1024*1024 bits).
:type mem: int
:param cores: CPU cores.
:type cores: int
:param partition: grid partition.
:type partition: string
"""
def __init__(self, time, mem, cores, partition, docker_image, depends=None):
# if time is not an int, try to format the equation
if not str(time).isdigit():
self.time = format_command(time, depends=depends, cores=cores)
else:
self.time = int(time)
# if memory is not an int, try to format the equation
if not str(mem).isdigit():
self.mem = format_command(mem, depends=depends, cores=cores)
else:
self.mem = int(mem)
self.cores = int(cores)
self.partition = partition
self.docker_image = docker_image
class Grid(object):
""" Base Grid Workflow manager class """
def __init__(self, name, worker, queue, tmpdir, benchmark_on=None, max_time=None, max_mem=None):
self.name = name
self.worker = worker
self.queue = queue
self.tmpdir = tmpdir
self.max_time = None
self.max_mem = None
try:
if max_time:
self.max_time = int(max_time)
except ValueError:
print("ERROR: Please provide an integer for the max time: {}".format(max_time))
try:
if max_mem:
self.max_mem = int(max_mem)
except ValueError:
print("ERROR: Please provide an integer for the max memory: {}".format(max_mem))
# create the folder if it does not already exist for temp directory
if not os.path.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
self.task_data = dict()
def _get_grid_task_settings(self, kwargs, depends):
""" Get the resources required to run this task on the grid """
# check for the required keywords
requires=[]
for key in ["time","mem","cores"]:
try:
requires.append(kwargs[key])
except KeyError:
raise KeyError(key+" is a required keyword argument for a grid task")
# check for optional keyword
try:
requires.append(kwargs["partition"])
except KeyError:
requires.append(None)
try:
requires.append(kwargs["docker_image"])
except KeyError:
requires.append(None)
requires+=[depends]
jobrequires=GridJobRequires(*requires)
# add the max time and memory overrides
jobrequires.time=[jobrequires.time,self.max_time]
jobrequires.mem=[jobrequires.mem,self.max_mem]
return (jobrequires, self.tmpdir)
def do(self, task, **kwargs):
"""Accepts the following extra arguments:
:param time: The maximum time in minutes allotted to run the
command
:type time: int
:param mem: The maximum memory in megabytes allocated to run
the command
:type mem: int
:param cores: The number of CPU cores allocated to the job
:type cores: int
:param partition: The grid partition to send this job to
:type partition: str
"""
self.add_task(task, **kwargs)
def add_task(self, task, **kwargs):
"""Accepts the following extra arguments:
:keyword time: The maximum time in minutes allotted to run the
command
:type time: int
:keyword mem: The maximum memory in megabytes allocated to run
the command
:type mem: int
:keyword cores: The number of CPU cores allocated to the job
:type cores: int
:keyword partition: The grid partition to send this job to
:type partition: str
"""
self.task_data[task.task_no] = self._get_grid_task_settings(kwargs, task.depends)
def runner(self, workflow, jobs=1, grid_jobs=1):
runner = runners.GridRunner(workflow)
runner.add_worker(runners.ParallelLocalWorker,
name="local", rate=jobs, default=True)
runner.add_worker(self.worker, name=self.name, rate=grid_jobs)
runner.routes.update((
( task_no, (self.name, list(extra)+[self.queue, workflow._reporter]) )
for task_no, extra in six.iteritems(self.task_data)
))
return runner
class GridQueue(object):
def __init__(self, partition, benchmark_on=None):
# check for short/long partitions
if not isinstance(partition, list):
partition = [x.strip() for x in partition.split(",")]
try:
self.partition_short, self.partition_long, self.partition_cutoff = partition
self.partition_cutoff = int(self.partition_cutoff)
except ValueError:
self.partition_short = partition[0]
self.partition_long = partition[0]
self.partition_cutoff = 0
# this is the refresh rate for checking the queue, in seconds
self.refresh_rate = 10*60
# this is the rate for checking the job status, in seconds
self.check_job_rate = 60
# this is the number of minutes to wait if there is an time out
# socket error returned from the scheduler when running a command
self.timeout_sleep = 5*60
# this is the number of times to retry after a timeout error
self.timeout_retry_max = 3
# this is the number of seconds to wait after job submission
self.submit_sleep = 5
# this is the last time the queue was checked
self.last_check = time.time()
self.sacct = None
# create a lock for jobs in queue
self.lock_status = threading.Lock()
self.lock_submit = threading.Lock()
# set if benchmarking should be run
self.benchmark_on = benchmark_on
@staticmethod
def submit_command(grid_script):
raise NotImplementedError
def submit_template(self):
raise NotImplementedError
def job_failed(self,status):
raise NotImplementedError
def job_stopped(self,status):
raise NotImplementedError
def refresh_queue_status(self):
raise NotImplementedError
def job_memkill(self, status, jobid, memory):
return False
def job_timeout(self, status, jobid, time):
return False
def get_job_status_from_stderr(self, error_file, grid_job_status, grid_jobid):
return grid_job_status
def get_partition(self, time, partition):
""" Get the partition for the task based on the time requested """
# if a partition is already set for the task, use that partition
if not partition is None:
return partition
if time > self.partition_cutoff:
return self.partition_long
else:
return self.partition_short
def get_queue_status(self, refresh=None):
""" Get the queue accounting stats """
# lock to prevent race conditions with status update
self.lock_status.acquire()
# check the last time the queue was captured and refresh if set
current_time = time.time()
if ( current_time - self.last_check > self.refresh_rate ) or refresh or self.sacct is None:
self.last_check = current_time
logging.info("Getting latest queue info to refresh job status")
self.sacct = self.refresh_queue_status()
self.lock_status.release()
return self.sacct
def get_all_stats_for_jobid(self,jobid):
""" Get all the stats for a specific job id """
# use the existing stats, to get the information for the jobid
try:
job_stats=list(filter(lambda x: x[0].startswith(jobid),self.get_queue_status()))
except IndexError:
job_stats=[]
# if the job stats are not found for the job, return an NA state
if not job_stats:
job_stats=[[jobid,"Pending","NA","NA","NA"]]
return job_stats
def get_job_status(self, jobid):
""" Check the status of the job """
info=self.get_all_stats_for_jobid(jobid)
return info[0][1]
def record_benchmark(self, jobid, task_number, reporter):
""" Check the benchmarking stats of the grid id """
# check if benchmarking is set to off
if not self.benchmark_on:
logging.info("Benchmarking is set to off")
return
reporter.task_grid_status(task_number,jobid,"Getting benchmarking data")
status, cpus, elapsed, memory = self.get_benchmark(jobid)
logging.info("Benchmark information for job id %s:\nElapsed Time: %s \nCores: %s\nMemory: %s MB",
task_number, elapsed, cpus, memory)
reporter.task_grid_status(task_number,jobid,"Final status of "+status)
def get_benchmark(self, jobid, wait=None):
""" Get the benchmarking data for the jobid """
# if the job is not shown to have finished running then
# wait for the next queue refresh
status=self.get_job_status(jobid)
if wait or not (self.job_stopped(status) or self.job_failed(status)):
wait_time = abs(self.refresh_rate - (time.time() - self.last_check)) + 10
time.sleep(wait_time)
info=self.get_all_stats_for_jobid(jobid)
try:
status=info[0][1]
except IndexError:
status="Unknown"
try:
cpus=info[0][2]
except IndexError:
cpus="NA"
try:
elapsed=info[0][3]
except IndexError:
elapsed="NA"
# get the memory max from the batch line which is the second line of output
try:
memory=info[0][4]
except IndexError:
memory="NA"
if "K" in memory:
# if memory is in KB, convert to MB
memory="{:.1f}".format(float(memory.replace("K",""))/1024.0)
elif "M" in memory:
memory="{:.1f}".format(float(memory.replace("M","")))
elif "G" in memory:
# if memory is in GB, convert to MB
memory="{:.1f}".format(float(memory.replace("G",""))*1024.0)
return status, cpus, elapsed, memory
def run_grid_command(self,command):
""" Run the grid command and check for errors """
error=None
if six.callable(command):
try:
logging.debug("Running grid submit command")
stdout=command()
except StandardError as err:
error=err.output
stdout=error or "error"
else:
try:
logging.debug("Running grid command: %s"," ".join(command))
stdout=subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as err:
error=err.output.decode('utf-8')
stdout=error or "error"
timeout_error=False
if error and "error" in error and "Socket timed out on send/recv operation" in error:
# check for a socket timeout error
timeout_error=True
return stdout, timeout_error
def run_grid_command_resubmit(self,command):
""" Run this grid command, check for error, resubmit if needed """
# run the grid command
stdout, timeout_error = self.run_grid_command(command)
# retry if timeout error present after wait
resubmissions = 0
if timeout_error and resubmissions < self.timeout_retry_max:
resubmissions+=1
# wait before retrying
logging.warning("Unable to run grid command, waiting and retrying")
time.sleep(self.timeout_sleep)
stdout, timeout_error = self.run_grid_command(command)
return stdout
@staticmethod
def job_submission_failed(jobid):
""" Check if the job failed in submission and did not get an id """
return True if not jobid.isdigit() else False
@staticmethod
def get_job_id_from_submit_output(stdout):
try:
# search for the decimal job id at any location in stdout
jobid=re.findall(r'\d+',stdout)[0]
except IndexError:
jobid="error"
return jobid
def submit_job(self,grid_script):
""" Submit the grid jobs and return the grid job id """
# lock so only one task submits jobs to the queue at a time
self.lock_submit.acquire()
# submit the job and get the grid id
logging.debug("Submitting job to grid")
stdout=self.run_grid_command_resubmit(self.submit_command(grid_script))
# get the job id from the stdout
jobid=self.get_job_id_from_submit_output(stdout)
# check the jobid for a submission failed
if self.job_submission_failed(jobid):
logging.error("Unable to submit job to queue: "+stdout)
# pause for the scheduler
time.sleep(self.submit_sleep)
self.lock_submit.release()
return jobid
def create_grid_script(self,partition,cpus,minutes,memory,command,taskid,dir,docker_image):
""" Create a grid script from the template also creating temp stdout and stderr files """
# create temp files for stdout, stderr, and return code
handle_out, out_file=tempfile.mkstemp(suffix=".out",prefix="task_"+str(taskid)+"_",dir=dir)
os.close(handle_out)
handle_err, error_file=tempfile.mkstemp(suffix=".err",prefix="task_"+str(taskid)+"_",dir=dir)
os.close(handle_err)
handle_rc, rc_file=tempfile.mkstemp(suffix=".rc",prefix="task_"+str(taskid)+"_",dir=dir)
os.close(handle_rc)
# add the remaining sections to the bash template
bash_template = string.Template("\n".join(["#!/bin/bash "] + self.submit_template() + ["${command}", "${rc_command}"]))
# convert the minutes to the time string "HH:MM:00"
hours, remaining_minutes = divmod(minutes, 60)
time = "{:02d}:{:02d}:00".format(hours, remaining_minutes)
bash=bash_template.substitute(partition=partition,cpus=cpus,time=time,
memory=memory,command=command,output=out_file,error=error_file,rc_command="export RC=$? ; echo $RC > "+rc_file+" ; bash -c 'exit $RC'")
file_handle, new_file=tempfile.mkstemp(suffix=".bash",prefix="task_"+str(taskid)+"_",dir=dir)
os.write(file_handle,bytearray(bash, 'utf-8'))
os.close(file_handle)
return new_file, out_file, error_file, rc_file
class GridWorker(threading.Thread):
""" Base Grid Worker class """
def __init__(self, work_q, result_q, lock, reporter):
super(GridWorker, self).__init__()
self.daemon = True
self.logger = runners.logger
self.work_q = work_q
self.result_q = result_q
self.lock = lock
self.reporter = reporter
@staticmethod
def appropriate_q_class(*args, **kwargs):
return six.moves.queue.Queue(*args, **kwargs)
@staticmethod
def appropriate_lock():
return threading.Lock()
def run(self):
return runners.worker_run_loop(self.work_q, self.result_q, self.run_task_by_type,
self.reporter, self.lock)
@classmethod
def run_task_by_type(cls, task, extra):
# if any of the tasks are a function, then use pickle interface
if list(filter(six.callable,task.actions)):
return cls.run_task_function(task, extra)
else:
return cls.run_task_command(task, extra)
@classmethod
def run_task_function(cls, task, extra):
(perf, tmpdir, grid_queue, reporter) = extra
# create a script to run the python function
pickle_script = picklerunner.PickleScript(task, tmpdir, "task_"+str(task.task_no))
pickle_task = pickle_script.create_task()
# run the task as a command
result = cls.run_task_command(pickle_task, extra)
# decode the result
result = pickle_script.result(result)
return result
@classmethod
def run_task_command(cls, task, extra):
(perf, tmpdir, grid_queue, reporter) = extra
# report the task has started
reporter.task_running(task.task_no)
# create a script and stdout/stderr files for this task
commands="\n".join(task.actions)
logging.info("Running commands for task id %s:\n%s", task.task_no, commands)
resubmission = 0
cores, time, memory, partition, docker_image = perf.cores, perf.time, perf.mem, perf.partition, perf.docker_image
jobid, out_file, error_file, rc_file = cls.submit_grid_job(cores, time, memory,
partition, tmpdir, commands, task, grid_queue, reporter, docker_image)
# monitor job if submission was successful
result, job_final_status = cls.check_submission_then_monitor_grid_job(grid_queue,
task, jobid, out_file, error_file, rc_file, reporter)
# if a timeout or memory max, resubmit at most three times
while ( grid_queue.job_timeout(job_final_status, jobid, time) or grid_queue.job_memkill(job_final_status, jobid, memory) ) and resubmission < 3:
resubmission+=1
# increase the memory or the time
if grid_queue.job_timeout(job_final_status, jobid, time):
time = "({})*2".format(time) if isinstance(time,str) else time*2
logging.info("Resubmission number %s of grid job for task id %s with 2x more time: %s minutes",
resubmission, task.task_no, time)
reporter.task_grid_status(task.task_no,jobid,"Resubmitting due to time out")
elif grid_queue.job_memkill(job_final_status, jobid, memory):
memory = "({})*2".format(memory) if isinstance(memory,str) else memory*2
logging.info("Resubmission number %s of grid job for task id %s with 2x more memory: %s MB",
resubmission, task.task_no, memory)
reporter.task_grid_status(task.task_no,jobid,"Resubmitting due to max memory")
jobid, out_file, error_file, rc_file = cls.submit_grid_job(cores, time, memory,
partition, tmpdir, commands, task, grid_queue, reporter, docker_image)
# monitor job if submission was successful
result, job_final_status = cls.check_submission_then_monitor_grid_job(grid_queue,
task, jobid, out_file, error_file, rc_file, reporter)
# get the benchmarking data if the job was submitted
if not grid_queue.job_submission_failed(jobid):
grid_queue.record_benchmark(jobid, task.task_no, reporter)
return result
@classmethod
def submit_grid_job(cls, cores, time, memory, partition, tmpdir, commands, task, grid_queue, reporter, docker_image):
# evaluate the time/memory requests for the job
time, memory = cls.evaluate_resource_requests(time, memory)
# get the partition for the task
current_partition = grid_queue.get_partition(time, partition)
# create the grid bash script
grid_script, out_file, error_file, rc_file = grid_queue.create_grid_script(current_partition,
cores, time, memory, commands, task.task_no, tmpdir, docker_image)
logging.info("Created grid files for task id %s: %s, %s, %s, %s",
task.task_no, grid_script, out_file, error_file, rc_file)
# submit the job
jobid = grid_queue.submit_job(grid_script)
logging.info("Submitted job for task id %s: grid id %s", task.task_no,
jobid)
if not grid_queue.job_submission_failed(jobid):
reporter.task_grid_status(task.task_no,jobid,"Submitted")
return jobid, out_file, error_file, rc_file
@staticmethod
def log_grid_output(taskid, file, file_type):
""" Write the grid stdout/stderr files to the log """
try:
lines=open(file).readlines()
except EnvironmentError:
lines=[]
logging.info("Grid %s from task id %s:\n%s",taskid, file_type, "".join(lines))
@staticmethod
def get_return_code(file):
""" Read the return code from the file """
try:
line=open(file).readline().rstrip()
except (EnvironmentError, TypeError):
line=""
return line
@staticmethod
def evaluate_resource_requests(time,mem):
""" Evaluate the time/memory requests for the grid job, allowing for ints or formulas """
# allow for optional max time and memory
if not isinstance(time,list):
time=[time]
if not isinstance(mem,list):
mem=[mem]
try:
time[0]=eval(str(time[0]))
except TypeError:
raise TypeError("Unable to evaluate time request for task: "+ time)
try:
mem[0]=eval(str(mem[0]))
except TypeError:
raise TypeError("Unable to evaluate memory request for task: "+ mem)
# check for override with max
if time[-1] and time[0] > time[-1]:
logging.info("Using override of max time from {0} reset to {1}".format(time[0],time[-1]))
time=time[-1]
else:
time=time[0]
if mem[-1] and mem[0] > mem[-1]:
logging.info("Using override of max mem from {0} reset to {1}".format(mem[0],mem[-1]))
mem=mem[-1]
else:
mem=mem[0]
return time, mem
@classmethod
def check_submission_then_monitor_grid_job(cls, grid_queue, task, grid_jobid,
out_file, error_file, rc_file, reporter):
# monitor job if submission was successful
if not grid_queue.job_submission_failed(grid_jobid):
result, job_final_status = cls.monitor_grid_job(grid_queue, task, grid_jobid,
out_file, error_file, rc_file, reporter)
else:
job_final_status = "SUBMIT FAILED"
# get the anadama task result
result=runners._get_task_result(task)
# add the extra error
result = result._replace(error=str(result.error)+"Unable to submit job to queue.")
return result, job_final_status
@classmethod
def monitor_grid_job(cls, grid_queue, task, grid_jobid, out_file, error_file, rc_file, reporter):
# poll to check for status
grid_job_status=None
for tries in itertools.count(1):
# only check status at intervals
time.sleep(grid_queue.check_job_rate)
# check the queue stats
grid_job_status = grid_queue.get_job_status(grid_jobid)
reporter.task_grid_status_polling(task.task_no,grid_jobid,grid_job_status)
logging.info("Status for job id %s with grid id %s is %s",task.task_no,
grid_jobid,grid_job_status)
if grid_queue.job_stopped(grid_job_status):
logging.info("Grid status for job id %s shows it has stopped",task.task_no)
break
# check if the return code file is written
if rc_file and os.path.getsize(rc_file) > 0:
logging.info("Return code file for job id %s shows it has stopped",task.task_no)
break
# check if a grid error is written to the output file
grid_job_status = grid_queue.get_job_status_from_stderr(error_file, grid_job_status, grid_jobid)
# write the stdout and stderr to the log
if out_file:
cls.log_grid_output(task.task_no, out_file, "standard output")
if error_file:
cls.log_grid_output(task.task_no, error_file, "standard error")
if rc_file:
cls.log_grid_output(task.task_no, rc_file, "return code")
# check the return code
extra_error=""
return_code=cls.get_return_code(rc_file)
if return_code and not return_code == "0":
extra_error="\nReturn Code Error: " + return_code
# check the queue status
if grid_queue.job_failed(grid_job_status):
extra_error+="\nGrid Status Error: " + grid_job_status
# get the anadama task result
result=runners._get_task_result(task)
# add the extra error if found
if extra_error:
result = result._replace(error=str(result.error)+extra_error)
return result, grid_job_status
| 36.444909 | 152 | 0.605526 |
12703fc35cb13a111f4d4a67edef7f21df62d678 | 2,844 | py | Python | src/rubrix/sdk/models/text_classification_record_inputs.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | src/rubrix/sdk/models/text_classification_record_inputs.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | src/rubrix/sdk/models/text_classification_record_inputs.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..types import Unset
T = TypeVar("T", bound="TextClassificationRecordInputs")
@attr.s(auto_attribs=True)
class TextClassificationRecordInputs:
""" """
additional_properties: Dict[str, Union[str, List[str]]] = attr.ib(
init=False, factory=dict
)
def to_dict(self) -> Dict[str, Any]:
field_dict: Dict[str, Any] = {}
for prop_name, prop in self.additional_properties.items():
if isinstance(prop, list):
field_dict[prop_name] = prop
else:
field_dict[prop_name] = prop
field_dict.update({})
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
text_classification_record_inputs = cls()
additional_properties = {}
for prop_name, prop_dict in d.items():
def _parse_additional_property(data: Any) -> Union[str, List[str]]:
data = None if isinstance(data, Unset) else data
additional_property: Union[str, List[str]]
try:
additional_property = cast(List[str], data)
return additional_property
except: # noqa: E722
pass
return cast(Union[str, List[str]], data)
additional_property = _parse_additional_property(prop_dict)
additional_properties[prop_name] = additional_property
text_classification_record_inputs.additional_properties = additional_properties
return text_classification_record_inputs
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Union[str, List[str]]:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Union[str, List[str]]) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 32.318182 | 87 | 0.651899 |
7be89b1878e37a1d27590ceff7093e0168541eab | 516 | py | Python | ibsng/handler/group/del_group.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 6 | 2018-03-06T10:16:36.000Z | 2021-12-05T12:43:10.000Z | ibsng/handler/group/del_group.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-03-06T10:27:08.000Z | 2022-01-02T15:21:27.000Z | ibsng/handler/group/del_group.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-01-06T16:28:31.000Z | 2018-09-17T19:47:19.000Z | """Delete group API method."""
from ibsng.handler.handler import Handler
class delGroup(Handler):
"""Delete group method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.group_name, str)
def setup(self, group_name):
"""Setup required parameters.
:param str group_name: group name
:return: None
:rtype: None
"""
self.group_name = group_name
| 20.64 | 46 | 0.591085 |
a91aace4b4b73696a2072b4c6c0785803cdba80a | 3,024 | py | Python | PythonCode/ModelPredictorModule.py | janw23/Ballance | f085d2d03c31a8e3be74d4c82300f571cc3cad65 | [
"MIT"
] | 2 | 2019-05-16T21:24:24.000Z | 2019-05-19T15:24:03.000Z | PythonCode/ModelPredictorModule.py | janw23/Ballance | f085d2d03c31a8e3be74d4c82300f571cc3cad65 | [
"MIT"
] | null | null | null | PythonCode/ModelPredictorModule.py | janw23/Ballance | f085d2d03c31a8e3be74d4c82300f571cc3cad65 | [
"MIT"
] | null | null | null | import math
import MathModule as MM
#modul przeiwdywania przyszlej pozycji kulki
class ModelPredictor:
eta = 0.00001 #stala 'niedokladnosci' obliczen
modelDelay = 0.2 #czas opoznienia reakcji rzeczywistego urzadzenia
gravity = 9.81
anglePerServoSignal = 7.5 * 0.001 * math.pi / 180 #kat nachylenia plyty przypadajacy na sygnal pozycji serw
boardSize = 1 / 0.2 #dlugosc boku platformy
servo_speed = 10000 #'prawdziwa' szybkosc ruchu serw
friction_static = (0.04 * gravity) ** 2 #wspolczynnik tarcia statycznego
friction_dynamic = 0.002 * gravity #wspolczynnik tarcia dynamicznego
def __init__(self):
self.position = [0.1, 0.1]
self.velocity = [0.0, 0.0]
self.servo = [0.0, 0.0]
self.servo_target = [0.0, 0.0] #docelowa pozycja serwa
#self.signal_delayer = MM.SignalDelay(4, (0.5, 0.5))
def GetPosition(self):
return tuple(self.position) #self.signal_delayer.get()
def Reset(self):
self.position = [0.5, 0.5]
self.velocity = [0.0, 0.0]
def SetServos(self, values):
self.servo_target[0] = values[0]
self.servo_target[1] = -values[1]
#aktualizuje pozycje serw
def updateServos(self, deltaTime):
for i in range(2): #tylko 2 serwa
movement_dir = MM.sign(self.servo_target[i] - self.servo[i])
self.servo[i] += ModelPredictor.servo_speed * movement_dir * deltaTime
if movement_dir > 0: self.servo[i] = min(self.servo[i], self.servo_target[i])
elif movement_dir < 0: self.servo[i] = max(self.servo[i], self.servo_target[i])
self.servo[i] = round(self.servo[i])
def update(self, deltaTime):
ModelPredictor.updateServos(self, deltaTime)
accel_x = math.sin(self.servo[0] * ModelPredictor.anglePerServoSignal) * ModelPredictor.gravity
accel_y = math.sin(self.servo[1] * ModelPredictor.anglePerServoSignal) * ModelPredictor.gravity
#jesli kulka jest nieruchoma
if MM.sqrMagnitude(self.velocity) <= ModelPredictor.eta:
#jesli sila dzialajaca na kulke jest za mala, zeby pokonac sile tarcia statycznego
if MM.sqrMagnitude(accel_x, accel_y) <= ModelPredictor.friction_static:
accel_x = 0
accel_y = 0
#jesli kulka jest ruchoma
else:
#aplikowanie tarcia dynamicznego
direction = MM.normalized(self.velocity)
accel_x -= direction[0] * ModelPredictor.friction_dynamic
accel_y -= direction[1] * ModelPredictor.friction_dynamic
self.velocity[0] += accel_x * deltaTime
self.velocity[1] += accel_y * deltaTime
self.position[0] += self.velocity[0] * ModelPredictor.boardSize * deltaTime
self.position[1] += self.velocity[1] * ModelPredictor.boardSize * deltaTime
#self.signal_delayer.push(tuple(self.position))
#self.signal_delayer.tick() | 39.789474 | 111 | 0.637235 |
d9fa2107c380e2b8cf924c8b99282e9f1a5ef054 | 709 | py | Python | home/migrations/0018_auto_20210806_1501.py | SoumyaRanjanPatnaik/Health-And-Safety-Dashboard | ad5ba26f280db0f199a0a4598959c0ec0b4d6515 | [
"MIT"
] | 1 | 2021-07-17T07:55:48.000Z | 2021-07-17T07:55:48.000Z | home/migrations/0018_auto_20210806_1501.py | SoumyaRanjanPatnaik/Health-And-Safety-Dashboard | ad5ba26f280db0f199a0a4598959c0ec0b4d6515 | [
"MIT"
] | null | null | null | home/migrations/0018_auto_20210806_1501.py | SoumyaRanjanPatnaik/Health-And-Safety-Dashboard | ad5ba26f280db0f199a0a4598959c0ec0b4d6515 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-06 15:01
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('home', '0017_auto_20210806_1420'),
]
operations = [
migrations.AddField(
model_name='log',
name='date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='log',
name='time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 26.259259 | 93 | 0.610719 |
8f73d94fd3cdcb5b3078226c573445355eeb58ae | 1,026 | py | Python | bcs-ui/backend/resources/workloads/statefulset/formatter.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/resources/workloads/statefulset/formatter.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/resources/workloads/statefulset/formatter.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Dict
from backend.resources.workloads.common.formatter import WorkloadFormatter
class StatefulSetFormatter(WorkloadFormatter):
""" StatefulSet 格式化 """
def format_dict(self, resource_dict: Dict) -> Dict:
return self.format_common_dict(resource_dict)
| 41.04 | 115 | 0.779727 |
e0406c39fb352e91c6d82c7d9b9c7e3a34e61d8a | 8,397 | py | Python | train.py | neeleshnegi/chat-bot | 1800cc0896ca30f573a3c8baa9719685908c1d5b | [
"MIT"
] | null | null | null | train.py | neeleshnegi/chat-bot | 1800cc0896ca30f573a3c8baa9719685908c1d5b | [
"MIT"
] | null | null | null | train.py | neeleshnegi/chat-bot | 1800cc0896ca30f573a3c8baa9719685908c1d5b | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import argparse
import time, datetime
import os
import pickle
import sys
from utils import TextLoader
from model import Model
def main():
assert sys.version_info >= (3, 3), \
"Must be run in Python 3.3 or later. You are running {}".format(sys.version)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/scotus',
help='data directory containing input.txt')
parser.add_argument('--save_dir', type=str, default='models/new_save',
help='directory for checkpointed models (load from here if one is already present)')
parser.add_argument('--block_size', type=int, default=2048,
help='number of cells per block')
parser.add_argument('--num_blocks', type=int, default=3,
help='number of blocks per layer')
parser.add_argument('--num_layers', type=int, default=3,
help='number of layers')
parser.add_argument('--model', type=str, default='gru',
help='rnn, gru, lstm or nas')
parser.add_argument('--batch_size', type=int, default=40,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=40,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=5000,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=1e-5,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.975,
help='how much to decay the learning rate')
parser.add_argument('--decay_steps', type=int, default=100000,
help='how often to decay the learning rate')
parser.add_argument('--set_learning_rate', type=float, default=-1,
help='reset learning rate to this value (if greater than zero)')
args = parser.parse_args()
train(args)
def train(args):
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
load_model = False
if not os.path.exists(args.save_dir):
print("Creating directory %s" % args.save_dir)
os.mkdir(args.save_dir)
elif (os.path.exists(os.path.join(args.save_dir, 'config.pkl'))):
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = pickle.load(f)
args.block_size = saved_args.block_size
args.num_blocks = saved_args.num_blocks
args.num_layers = saved_args.num_layers
args.model = saved_args.model
print("Found a previous checkpoint. Overwriting model description arguments to:")
print(" model: {}, block_size: {}, num_blocks: {}, num_layers: {}".format(
saved_args.model, saved_args.block_size, saved_args.num_blocks, saved_args.num_layers))
load_model = True
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
pickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
pickle.dump((data_loader.chars, data_loader.vocab), f)
print("Building the model")
model = Model(args)
print("Total trainable parameters: {:,d}".format(model.trainable_parameter_count()))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
config = tf.ConfigProto(log_device_placement=False)
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(model.save_variables_list(), max_to_keep=3)
if (load_model):
print("Loading saved parameters")
saver.restore(sess, ckpt.model_checkpoint_path)
global_epoch_fraction = sess.run(model.global_epoch_fraction)
global_seconds_elapsed = sess.run(model.global_seconds_elapsed)
if load_model: print("Resuming from global epoch fraction {:.3f},"
" total trained time: {}, learning rate: {}".format(
global_epoch_fraction,
datetime.timedelta(seconds=float(global_seconds_elapsed)),
sess.run(model.lr)))
if (args.set_learning_rate > 0):
sess.run(tf.assign(model.lr, args.set_learning_rate))
print("Reset learning rate to {}".format(args.set_learning_rate))
data_loader.cue_batch_pointer_to_epoch_fraction(global_epoch_fraction)
initial_batch_step = int((global_epoch_fraction
- int(global_epoch_fraction)) * data_loader.total_batch_count)
epoch_range = (int(global_epoch_fraction),
args.num_epochs + int(global_epoch_fraction))
writer = tf.summary.FileWriter(args.save_dir, graph=tf.get_default_graph())
outputs = [model.cost, model.final_state, model.train_op, model.summary_op]
global_step = epoch_range[0] * data_loader.total_batch_count + initial_batch_step
avg_loss = 0
avg_steps = 0
try:
for e in range(*epoch_range):
state = sess.run(model.zero_state)
batch_range = (initial_batch_step, data_loader.total_batch_count)
initial_batch_step = 0
for b in range(*batch_range):
global_step += 1
if global_step % args.decay_steps == 0:
current_learning_rate = sess.run(model.lr)
current_learning_rate *= args.decay_rate
sess.run(tf.assign(model.lr, current_learning_rate))
print("Decayed learning rate to {}".format(current_learning_rate))
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.targets: y}
model.add_state_to_feed_dict(feed, state)
train_loss, state, _, summary = sess.run(outputs, feed)
elapsed = time.time() - start
global_seconds_elapsed += elapsed
writer.add_summary(summary, e * batch_range[1] + b + 1)
if avg_steps < 100: avg_steps += 1
avg_loss = 1 / avg_steps * train_loss + (1 - 1 / avg_steps) * avg_loss
print("{:,d} / {:,d} (epoch {:.3f} / {}), loss {:.3f} (avg {:.3f}), {:.3f}s" \
.format(b, batch_range[1], e + b / batch_range[1], epoch_range[1],
train_loss, avg_loss, elapsed))
if (e * batch_range[1] + b + 1) % args.save_every == 0 \
or (e == epoch_range[1] - 1 and b == batch_range[1] - 1):
save_model(sess, saver, model, args.save_dir, global_step,
data_loader.total_batch_count, global_seconds_elapsed)
except KeyboardInterrupt:
print()
finally:
writer.flush()
global_step = e * data_loader.total_batch_count + b
save_model(sess, saver, model, args.save_dir, global_step,
data_loader.total_batch_count, global_seconds_elapsed)
def save_model(sess, saver, model, save_dir, global_step, steps_per_epoch, global_seconds_elapsed):
global_epoch_fraction = float(global_step) / float(steps_per_epoch)
checkpoint_path = os.path.join(save_dir, 'model.ckpt')
print("Saving model to {} (epoch fraction {:.3f})...".format(checkpoint_path, global_epoch_fraction),
end='', flush=True)
sess.run(tf.assign(model.global_epoch_fraction, global_epoch_fraction))
sess.run(tf.assign(model.global_seconds_elapsed, global_seconds_elapsed))
saver.save(sess, checkpoint_path, global_step = global_step)
print("\rSaved model to {} (epoch fraction {:.3f}). ".format(checkpoint_path, global_epoch_fraction))
if __name__ == '__main__':
main()
| 51.20122 | 107 | 0.611885 |
1c97eba874cd83943cd9affa8bb930ddb4922376 | 14,276 | py | Python | tests/unit/fake_data_root/openstack/var/lib/juju/agents/unit-neutron-openvswitch-1/charm/hooks/neutron_ovs_context.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 17 | 2016-04-17T04:00:56.000Z | 2020-08-19T12:03:18.000Z | tests/unit/fake_data_root/openstack/var/lib/juju/agents/unit-neutron-openvswitch-1/charm/hooks/neutron_ovs_context.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 111 | 2021-10-01T18:18:17.000Z | 2022-03-29T12:23:20.000Z | tests/unit/fake_data_root/openstack/var/lib/juju/agents/unit-neutron-openvswitch-1/charm/hooks/neutron_ovs_context.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 23 | 2016-03-23T11:12:19.000Z | 2021-12-17T18:24:16.000Z | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from charmhelpers.core.hookenv import (
config,
log,
relation_get,
relation_ids,
related_units,
unit_get,
network_get_primary_address,
)
from charmhelpers.core.host import (
CompareHostReleases,
is_container,
lsb_release,
write_file,
)
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.openstack.utils import (
get_host_ip,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
NeutronAPIContext,
)
from charmhelpers.contrib.openstack.utils import (
os_release,
CompareOpenStackReleases,
)
IPTABLES_HYBRID = 'iptables_hybrid'
OPENVSWITCH = 'openvswitch'
VALID_FIREWALL_DRIVERS = (IPTABLES_HYBRID, OPENVSWITCH)
NFG_LOG_RATE_LIMIT_MIN = 100
NFG_LOG_BURST_LIMIT_MIN = 25
def _get_firewall_driver(ovs_ctxt):
'''
Determine the firewall driver to use based on configuration,
OpenStack and Ubuntu releases.
@returns str: firewall driver to use for OpenvSwitch
'''
driver = config('firewall-driver') or IPTABLES_HYBRID
release = lsb_release()['DISTRIB_CODENAME']
if driver not in VALID_FIREWALL_DRIVERS:
return IPTABLES_HYBRID
if driver == IPTABLES_HYBRID and ovs_ctxt['enable_nsg_logging']:
msg = "NSG logging can not be enabled - need to set " \
"firewall driver to 'openvswitch' explicitly"
log(msg, "WARN")
if (driver == OPENVSWITCH and
CompareHostReleases(release) < 'xenial'):
# NOTE(jamespage): Switch back to iptables_hybrid for
# Ubuntu releases prior to Xenial due
# to requirements for Linux >= 4.4 and
# Open vSwitch >= 2.5
return IPTABLES_HYBRID
return driver
def get_nsg_log_path(desired_nsg_log_path):
if not desired_nsg_log_path:
# None means "we need to use syslog" - no need
# to check anything on filesystem
return None
dst_dir, _ = os.path.split(desired_nsg_log_path)
path_exists = os.path.exists(dst_dir)
if not path_exists:
log(
"Desired NSG log directory {} not exists! "
"falling back to syslog".format(dst_dir),
"WARN"
)
return None
if path_exists and os.path.isdir(desired_nsg_log_path):
log(
"Desired NSG log path {} should be file, not directory! "
"falling back to syslog".format(desired_nsg_log_path),
"WARN"
)
return None
return desired_nsg_log_path
def validate_nfg_log_path(desired_nfg_log_path):
if not desired_nfg_log_path:
# None means "we need to use syslog" - no need
# to check anything on filesystem
return None
dst_dir, _ = os.path.split(desired_nfg_log_path)
path_exists = os.path.exists(dst_dir)
if not path_exists:
log(
"Desired NFG log directory {} not exists! "
"falling back to syslog".format(dst_dir),
"WARN"
)
return None
if path_exists and os.path.isdir(desired_nfg_log_path):
log(
"Desired NFG log path {} should be file, not directory! "
"falling back to syslog".format(desired_nfg_log_path),
"WARN"
)
return None
return desired_nfg_log_path
class OVSPluginContext(context.NeutronContext):
interfaces = []
@property
def plugin(self):
return 'ovs'
@property
def network_manager(self):
return 'neutron'
@property
def neutron_security_groups(self):
if config('disable-security-groups'):
return False
neutron_api_settings = NeutronAPIContext()()
return neutron_api_settings['neutron_security_groups']
def disable_mlockall(self):
'''
Determine if Open vSwitch use of mlockall() should be disabled
If the disable-mlockall config option is unset, mlockall will be
disabled if running in a container and will default to enabled if
not running in a container.
'''
disable_mlockall = config('disable-mlockall')
if disable_mlockall is None:
disable_mlockall = False
if is_container():
disable_mlockall = True
cmp_release = CompareOpenStackReleases(
os_release('neutron-common', base='icehouse'))
return (cmp_release >= 'mitaka' and disable_mlockall)
def ovs_ctxt(self):
# In addition to generating config context, ensure the OVS service
# is running and the OVS bridge exists. Also need to ensure
# local_ip points to actual IP, not hostname.
ovs_ctxt = super(OVSPluginContext, self).ovs_ctxt()
if not ovs_ctxt:
return {}
conf = config()
fallback = get_host_ip(unit_get('private-address'))
if config('os-data-network'):
# NOTE: prefer any existing use of config based networking
ovs_ctxt['local_ip'] = \
get_address_in_network(config('os-data-network'),
fallback)
else:
# NOTE: test out network-spaces support, then fallback
try:
ovs_ctxt['local_ip'] = get_host_ip(
network_get_primary_address('data')
)
except NotImplementedError:
ovs_ctxt['local_ip'] = fallback
neutron_api_settings = NeutronAPIContext()()
ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups
ovs_ctxt['l2_population'] = neutron_api_settings['l2_population']
ovs_ctxt['distributed_routing'] = neutron_api_settings['enable_dvr']
ovs_ctxt['extension_drivers'] = neutron_api_settings[
'extension_drivers']
ovs_ctxt['overlay_network_type'] = \
neutron_api_settings['overlay_network_type']
ovs_ctxt['polling_interval'] = neutron_api_settings['polling_interval']
ovs_ctxt['rpc_response_timeout'] = \
neutron_api_settings['rpc_response_timeout']
ovs_ctxt['report_interval'] = neutron_api_settings['report_interval']
# TODO: We need to sort out the syslog and debug/verbose options as a
# general context helper
ovs_ctxt['use_syslog'] = conf['use-syslog']
ovs_ctxt['verbose'] = conf['verbose']
ovs_ctxt['debug'] = conf['debug']
ovs_ctxt['prevent_arp_spoofing'] = conf['prevent-arp-spoofing']
ovs_ctxt['enable_dpdk'] = conf['enable-dpdk']
ovs_ctxt['keepalived_healthcheck_interval'] = \
conf['keepalived-healthcheck-interval']
ovs_ctxt['disable_mlockall'] = self.disable_mlockall()
net_dev_mtu = neutron_api_settings.get('network_device_mtu')
if net_dev_mtu:
# neutron.conf
ovs_ctxt['network_device_mtu'] = net_dev_mtu
# ml2 conf
ovs_ctxt['veth_mtu'] = net_dev_mtu
mappings = config('bridge-mappings')
if mappings:
ovs_ctxt['bridge_mappings'] = ','.join(mappings.split())
sriov_mappings = config('sriov-device-mappings')
if sriov_mappings:
ovs_ctxt['sriov_device_mappings'] = (
','.join(sriov_mappings.split())
)
enable_sriov = config('enable-sriov')
if enable_sriov:
ovs_ctxt['enable_sriov'] = True
sriov_numvfs = config('sriov-numvfs')
if sriov_numvfs:
try:
if sriov_numvfs != 'auto':
int(sriov_numvfs)
except ValueError:
ovs_ctxt['sriov_vfs_list'] = sriov_numvfs
else:
ovs_ctxt['sriov_vfs_blanket'] = sriov_numvfs
flat_providers = config('flat-network-providers')
if flat_providers:
ovs_ctxt['network_providers'] = ','.join(flat_providers.split())
vlan_ranges = config('vlan-ranges')
if vlan_ranges:
ovs_ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())
ovs_ctxt['enable_nsg_logging'] = \
neutron_api_settings['enable_nsg_logging']
ovs_ctxt['nsg_log_output_base'] = get_nsg_log_path(
config('security-group-log-output-base')
)
ovs_ctxt['nsg_log_rate_limit'] = \
config('security-group-log-rate-limit')
ovs_ctxt['nsg_log_burst_limit'] = \
config('security-group-log-burst-limit')
ovs_ctxt['firewall_driver'] = _get_firewall_driver(ovs_ctxt)
if ovs_ctxt['firewall_driver'] != OPENVSWITCH:
ovs_ctxt['enable_nsg_logging'] = False
ovs_ctxt['of_inactivity_probe'] = config('of-inactivity-probe')
return ovs_ctxt
class ZoneContext(OSContextGenerator):
def __call__(self):
"""Return the 'default_availability_zone' from the principal that this
ovs unit is attached to (as a subordinate)
:returns: {} if no relation set, or
{'availability_zone': availability_zone from principal relation}
"""
# as ovs is a subordinate charm, it should only have one relation to
# its principal charm. Thus we can take the 1st (only) element in each
# list.
rids = relation_ids('neutron-plugin')
ctxt = {}
if rids:
rid = rids[0]
units = related_units(rid)
if units:
availability_zone = relation_get(
'default_availability_zone',
rid=rid,
unit=units[0])
if availability_zone:
ctxt['availability_zone'] = availability_zone
return ctxt
class L3AgentContext(OSContextGenerator):
def __call__(self):
neutron_api_settings = NeutronAPIContext()()
ctxt = {}
if neutron_api_settings['enable_dvr']:
use_dvr_snat = config('use-dvr-snat')
agent_mode = 'dvr_snat' if use_dvr_snat else 'dvr'
ctxt['agent_mode'] = agent_mode
ctxt['use_l3ha'] = neutron_api_settings.get('enable_l3ha', False)
if not config('ext-port'):
ctxt['external_configuration_new'] = True
else:
ctxt['agent_mode'] = 'legacy'
ctxt['enable_nfg_logging'] = (
neutron_api_settings['enable_nfg_logging']
)
ctxt['nfg_log_output_base'] = validate_nfg_log_path(
config('firewall-group-log-output-base')
)
ctxt['nfg_log_rate_limit'] = config(
'firewall-group-log-rate-limit'
)
if ctxt['nfg_log_rate_limit'] is not None:
ctxt['nfg_log_rate_limit'] = max(
ctxt['nfg_log_rate_limit'],
NFG_LOG_RATE_LIMIT_MIN
)
ctxt['nfg_log_burst_limit'] = config(
'firewall-group-log-burst-limit'
)
if ctxt['nfg_log_burst_limit'] is not None:
ctxt['nfg_log_burst_limit'] = max(
ctxt['nfg_log_burst_limit'],
NFG_LOG_BURST_LIMIT_MIN
)
l3_extension_plugins = neutron_api_settings.get(
'l3_extension_plugins', [])
ctxt['l3_extension_plugins'] = ','.join(l3_extension_plugins)
return ctxt
SHARED_SECRET = "/etc/neutron/secret.txt"
def get_shared_secret():
secret = None
if not os.path.exists(SHARED_SECRET):
secret = str(uuid.uuid4())
write_file(SHARED_SECRET, secret,
perms=0o400)
else:
os.chmod(SHARED_SECRET, 0o400)
with open(SHARED_SECRET, 'r') as secret_file:
secret = secret_file.read().strip()
return secret
class SharedSecretContext(OSContextGenerator):
def __call__(self):
if NeutronAPIContext()()['enable_dvr'] or \
config('enable-local-dhcp-and-metadata'):
ctxt = {
'shared_secret': get_shared_secret(),
}
else:
ctxt = {}
return ctxt
class RemoteRestartContext(OSContextGenerator):
def __init__(self, interfaces=None):
self.interfaces = interfaces or ['neutron-plugin']
def __call__(self):
rids = []
for interface in self.interfaces:
rids.extend(relation_ids(interface))
ctxt = {}
for rid in rids:
for unit in related_units(rid):
remote_data = relation_get(
rid=rid,
unit=unit)
for k, v in remote_data.items():
if k.startswith('restart-trigger'):
restart_key = k.replace('-', '_')
try:
ctxt[restart_key].append(v)
except KeyError:
ctxt[restart_key] = [v]
for restart_key in ctxt.keys():
ctxt[restart_key] = '-'.join(sorted(ctxt[restart_key]))
return ctxt
class APIIdentityServiceContext(context.IdentityServiceContext):
def __init__(self):
super(APIIdentityServiceContext,
self).__init__(rel_name='neutron-plugin-api')
def __call__(self):
ctxt = super(APIIdentityServiceContext, self).__call__()
if not ctxt:
return
for rid in relation_ids('neutron-plugin-api'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt['region'] = rdata.get('region')
if ctxt['region']:
return ctxt
return ctxt
| 33.35514 | 79 | 0.612006 |
fb7f9fb3181dee5ddc7af47e73dea54387c79eba | 32 | py | Python | anjay.py | eechzaan/project | f842f5ceb0e7653c51e410413371a08e3b1b0f62 | [
"MIT"
] | null | null | null | anjay.py | eechzaan/project | f842f5ceb0e7653c51e410413371a08e3b1b0f62 | [
"MIT"
] | null | null | null | anjay.py | eechzaan/project | f842f5ceb0e7653c51e410413371a08e3b1b0f62 | [
"MIT"
] | null | null | null |
for i in range(10):
print()
| 6.4 | 19 | 0.5625 |
1919e4dc4a184f40fb2eb92a78e8ef67161a93b0 | 1,516 | py | Python | compilation/threshold-write-image.py | stcastle/shell-detection | cdc49190deae7310db66e56574b6737771821f31 | [
"BSD-3-Clause"
] | 3 | 2018-03-01T01:14:21.000Z | 2020-04-17T08:49:32.000Z | compilation/threshold-write-image.py | SamTCastle/shell-detection | cdc49190deae7310db66e56574b6737771821f31 | [
"BSD-3-Clause"
] | null | null | null | compilation/threshold-write-image.py | SamTCastle/shell-detection | cdc49190deae7310db66e56574b6737771821f31 | [
"BSD-3-Clause"
] | null | null | null | '''
threshold_write_image.py
Experiment with OpenCV for FITS files.
http://docs.opencv.org/trunk/doc/py_tutorials/py_tutorials.html
Author: S.T. Castle
Created: 20150226
'''
import numpy as np
import cv2
import os.path
def threshold(name, orig_img, blockSize, c):
'''
name: original filename of the image
orig_img: The image to be processed.
blockSize: size of the pixel neighborhood for adaptive thresholding.
c: Correction to apply before thresholding.
'''
img = np.ndarray.copy(orig_img) # Copy of original for smoothing.
# Split filename and extension.
spl = os.path.splitext(name)
name_pref = spl[0] # Name prefix.
# Apply a smoothing filter.
# Bilateral blur.
img = cv2.bilateralFilter(img,9,75,75)
name_pref = name_pref + '_smooth=bilat'
# Adaptive mean threshold.
am_name = name_pref + '_adapt-mean-thr' + spl[1]
am_thr = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,blockSize,c)
cv2.imwrite(am_name, am_thr)
# Adaptive Gaussian threshold.
ag_name = name_pref + '_adapt-gauss-thr' + spl[1]
ag_thr = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,blockSize,c)
cv2.imwrite(ag_name, ag_thr)
# Otsu's threshold.
ot_name = name_pref + '_otsu-thr' + spl[1]
ret, ot_thr = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imwrite(ot_name, ot_thr)
| 30.32 | 76 | 0.675462 |
468200722d3d4631d4cdf89007406ce8f257b074 | 5,603 | py | Python | tensorflow_datasets/question_answering/ai2_arc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-10-11T19:15:49.000Z | 2020-10-11T19:15:49.000Z | tensorflow_datasets/question_answering/ai2_arc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2021-02-23T20:16:05.000Z | 2021-02-23T20:16:05.000Z | tensorflow_datasets/question_answering/ai2_arc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-08-03T20:19:12.000Z | 2020-08-03T20:19:12.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ai2_arc dataset."""
import json
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{allenai:arc,
author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
journal = {arXiv:1803.05457v1},
year = {2018},
}
"""
_DESCRIPTION = """
A new dataset of 7,787 genuine grade-school level, multiple-choice science
questions, assembled to encourage research in advanced question-answering.
The dataset is partitioned into a Challenge Set and an Easy Set, where the
former contains only questions answered incorrectly by both a retrieval-based
algorithm and a word co-occurrence algorithm. We are also including a corpus
of over 14 million science sentences relevant to the task, and an
implementation of three neural baseline models for this dataset.
We pose ARC as a challenge to the community.
"""
_HOMEPAGE = "https://allenai.org/data/arc"
_URL = "https://ai2-datasets.s3-us-west-2.amazonaws.com/arc/ARC-V1-Feb2018.zip"
class Ai2ArcConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Ai2ARC."""
def __init__(self, **kwargs):
"""BuilderConfig for Ai2Arc.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Ai2ArcConfig, self).__init__(
version=tfds.core.Version("1.0.0"), **kwargs)
class Ai2Arc(tfds.core.GeneratorBasedBuilder):
"""The AI2 ARC dataset."""
BUILDER_CONFIGS = [
Ai2ArcConfig(
name="ARC-Challenge",
description="""\
Challenge Set of 2590 "hard" questions (those that both a retrieval and a co-occurrence method fail to answer correctly)
""",
),
Ai2ArcConfig(
name="ARC-Easy",
description="""\
Easy Set of 5197 questions for the ARC Challenge.
""",
),
]
def _info(self):
# Most questions have four possible answers, but a few have five.
options = ["A", "B", "C", "D", "E"]
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"id":
tfds.features.Text(),
"question":
tfds.features.Text(),
"choices":
tfds.features.Sequence({
"text": tfds.features.Text(),
"label": tfds.features.ClassLabel(names=options)
}),
"answerKey":
tfds.features.ClassLabel(names=options),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "ARC-V1-Feb2018-2")
base_path = os.path.join(data_dir, self.builder_config.name)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"filepath":
os.path.join(base_path,
self.builder_config.name + "-Train.jsonl")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"filepath":
os.path.join(base_path,
self.builder_config.name + "-Dev.jsonl")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"filepath":
os.path.join(base_path,
self.builder_config.name + "-Test.jsonl")
},
),
]
def _generate_examples(self, filepath: str):
"""Yields examples. Compatible with huggingface's `nlp` format."""
# Generally labels are in the format "A", "B", "C", "D" but sometimes
# they are in the format "1", "2", "3", "4". We convert the later to the
# former for consistency.
n_to_l = dict(zip("1 2 3 4 5".split(), "A B C D E".split()))
with tf.io.gfile.GFile(filepath) as f:
for row in f:
data = json.loads(row)
answerkey = n_to_l.get(data["answerKey"], data["answerKey"])
id_ = data["id"]
question = data["question"]["stem"]
choices = data["question"]["choices"]
text_choices = [choice["text"] for choice in choices]
label_choices = [
n_to_l.get(choice["label"], choice["label"]) for choice in choices
]
yield id_, {
"id": id_,
"answerKey": answerkey,
"question": question,
"choices": {
"text": text_choices,
"label": label_choices
},
}
| 34.374233 | 130 | 0.602177 |
1b864ad8e87cd52750a6e0ff60f762519a00f3fa | 2,499 | py | Python | speeches/behaviors/models.py | danesjenovdan/parlaspeeches | 5431a167a483d1d67c380417a9d0b85ccec96249 | [
"Unlicense"
] | null | null | null | speeches/behaviors/models.py | danesjenovdan/parlaspeeches | 5431a167a483d1d67c380417a9d0b85ccec96249 | [
"Unlicense"
] | 3 | 2020-02-12T00:33:20.000Z | 2021-06-10T20:09:24.000Z | speeches/behaviors/models.py | danesjenovdan/parlaspeeches | 5431a167a483d1d67c380417a9d0b85ccec96249 | [
"Unlicense"
] | 1 | 2017-08-26T11:24:19.000Z | 2017-08-26T11:24:19.000Z | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.fields import AutoCreatedField, AutoLastModifiedField
from autoslug import AutoSlugField
from datetime import datetime
from taggit.managers import TaggableManager
__author__ = 'guglielmo'
class GenericRelatable(models.Model):
"""
An abstract class that provides the possibility of generic relations
"""
content_type = models.ForeignKey(ContentType,
related_name='%(app_label)s_%(class)s_related')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
abstract = True
class Timestampable(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created_at = AutoCreatedField(_('creation time'))
updated_at = AutoLastModifiedField(_('last modification time'))
class Meta:
abstract = True
class Versionable(models.Model):
"""
An abstract base class model that provides versioning fields
``valid_from`` and ``valid_to``
"""
valid_from = models.DateTimeField(
help_text=_('row valid from'),
blank=True,
null=True,
default=None
)
valid_to = models.DateTimeField(
help_text=_('row valid to'),
blank=True,
null=True,
default=None
)
class Meta:
abstract = True
class Permalinkable(models.Model):
"""
An abstract base class model that provides a unique slug,
and the methods necessary to handle the permalink
"""
from django.utils.text import slugify
slug = AutoSlugField(
populate_from=lambda instance: instance.slug_source,
unique=True,
slugify=slugify
)
class Meta:
abstract = True
def get_url_kwargs(self, **kwargs):
kwargs.update(getattr(self, 'url_kwargs', {}))
return kwargs
@models.permalink
def get_absolute_url(self):
url_kwargs = self.get_url_kwargs(slug=self.slug)
return (self.url_name, (), url_kwargs)
class Taggable(models.Model):
tags = TaggableManager()
class Meta:
abstract = True
| 27.163043 | 84 | 0.685474 |
7579009cc06014b7ad847f5bf5f07d1a9ad97df6 | 1,168 | py | Python | ddtrace/contrib/dogpile_cache/region.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/dogpile_cache/region.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | 9 | 2021-07-26T01:22:38.000Z | 2022-03-21T19:20:53.000Z | ddtrace/contrib/dogpile_cache/region.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import dogpile
from ddtrace.ext import SpanTypes
from ...constants import SPAN_MEASURED_KEY
from ...pin import Pin
def _wrap_get_create(func, instance, args, kwargs):
pin = Pin.get_from(dogpile.cache)
if not pin or not pin.enabled():
return func(*args, **kwargs)
key = args[0]
with pin.tracer.trace("dogpile.cache", resource="get_or_create", span_type=SpanTypes.CACHE) as span:
span.set_tag(SPAN_MEASURED_KEY)
span.set_tag("key", key)
span.set_tag("region", instance.name)
span.set_tag("backend", instance.actual_backend.__class__.__name__)
return func(*args, **kwargs)
def _wrap_get_create_multi(func, instance, args, kwargs):
pin = Pin.get_from(dogpile.cache)
if not pin or not pin.enabled():
return func(*args, **kwargs)
keys = args[0]
with pin.tracer.trace("dogpile.cache", resource="get_or_create_multi", span_type="cache") as span:
span.set_tag(SPAN_MEASURED_KEY)
span.set_tag("keys", keys)
span.set_tag("region", instance.name)
span.set_tag("backend", instance.actual_backend.__class__.__name__)
return func(*args, **kwargs)
| 33.371429 | 104 | 0.683219 |
b74943a2017281116723e5750d9d310fe7ff23bb | 3,078 | py | Python | spiders/spider/spider/settings.py | dingzhaohan/deep_research | 8d4373e32d75a348368d46eca2dd1b26d8d93c4b | [
"Apache-2.0"
] | 2 | 2020-02-20T01:57:20.000Z | 2020-09-12T12:43:46.000Z | spiders/spider/spider/settings.py | dingzhaohan/deep_research | 8d4373e32d75a348368d46eca2dd1b26d8d93c4b | [
"Apache-2.0"
] | null | null | null | spiders/spider/spider/settings.py | dingzhaohan/deep_research | 8d4373e32d75a348368d46eca2dd1b26d8d93c4b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spider'
SPIDER_MODULES = ['spider.spiders']
NEWSPIDER_MODULE = 'spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'spider.middlewares.SpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'spider.middlewares.SpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'spider.pipelines.SpiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 33.824176 | 103 | 0.774204 |
f62487cb8e45be0449175fff4320b71c15ad3da1 | 3,514 | py | Python | libestatecassandra/estate_cassandra.py | mpeuster/estate | 4cb94201e8110f09ac72c54e7d282e8c38aee415 | [
"Apache-2.0"
] | 1 | 2021-04-28T05:13:30.000Z | 2021-04-28T05:13:30.000Z | libestatecassandra/estate_cassandra.py | mpeuster/estate | 4cb94201e8110f09ac72c54e7d282e8c38aee415 | [
"Apache-2.0"
] | null | null | null | libestatecassandra/estate_cassandra.py | mpeuster/estate | 4cb94201e8110f09ac72c54e7d282e8c38aee415 | [
"Apache-2.0"
] | null | null | null | """
libestate prototype using cassandra as backend
idea: test different consistency models
"""
from cassandra import ConsistencyLevel
from cassandra.cqlengine import columns
from cassandra.cqlengine import connection
from cassandra.cqlengine import management
from cassandra.cqlengine import models
from cassandra.cqlengine import CQLEngineException
# define the model
class KeyValueItem(models.Model):
"""
We use cassandras map column type to store the values of one key
for all instances as well as the latest one.
latest field has index = -1
"""
key = columns.Text(primary_key=True)
#instance = columns.Integer(primary_key=True)
#value = columns.Text()
data = columns.Map(columns.Integer, columns.Text)
def __repr__(self):
return 'KeyValueItem.%s' % (self.key)
def __str__(self):
return self.__repr__()
class estate(object):
def __init__(self, instance_id):
self.instance_id = int(instance_id)
# setup cassadnra connection
connection.setup(['127.0.0.1'], "estate1", consistency=ConsistencyLevel.ALL)
# TODO move this to an indipened reset DB script
if self.instance_id == 0:
print "FLUSH DB"
# flush database by first node (for development)
management.drop_keyspace("estate1")
# create needed tables
management.create_keyspace_simple("estate1", replication_factor=1, durable_writes=True)
management.sync_table(KeyValueItem)
print "ES: Initialized estate for instance: %s" % self.instance_id
def set(self, k, s):
print "ES: SET k=%s s=%s" % (str(k), str(s))
# always update map field "latest" (-1) to contain the latest value for this key
KeyValueItem.objects(key=k).update(data__update={-1: str(s), self.instance_id: str(s)})
return True
def get(self, k):
print "ES: GET k=%s" % (str(k))
try:
kvi = KeyValueItem.get(key=k)
val = str(kvi.data.get(self.instance_id))
return val if val != "None" else "ES_NONE"
except CQLEngineException:
return "ES_NONE"
def delete(self, k):
"""
Deletes item of this instance for the given key.
Since cassadnra maps do only support add and updata, we set the value to None.
Attention: This will also set the "latest" map field to None.
"""
print "ES: DEL k=%s" % (str(k))
try:
#kvi = KeyValueItem.get(key=k)
#kvi.delete()
KeyValueItem.objects(key=k).update(data__update={-1: None, self.instance_id: None})
return True
except CQLEngineException:
return False
def _get_all_replicas(self, k):
try:
# return all map items, except of the latest field
return [v for i, v in KeyValueItem.get(key=k).data.iteritems() if i >= 0]
except CQLEngineException:
return []
def _get_newest_replica(self, k):
try:
# return all map items, except of the latest field
return KeyValueItem.get(key=k).data.get(-1)
except CQLEngineException:
return "ES_NONE"
def get_global(self, k, red_func):
print "ES: GET_GLOBAL k=%s f=%s" % (str(k), str(red_func))
if red_func is not None: # custom red function
return red_func(self._get_all_replicas(k))
return self._get_newest_replica(k) # return newest replica
| 33.788462 | 99 | 0.631759 |
f59e76b7bf9c99da6263ebfe38308b234796e431 | 420 | py | Python | ML_and_DL/loss_fucntion.py | AshfakYeafi/AI_practice_code | 3d8a0b9382f5903e840ce59218ebb95ca962ab01 | [
"MIT"
] | null | null | null | ML_and_DL/loss_fucntion.py | AshfakYeafi/AI_practice_code | 3d8a0b9382f5903e840ce59218ebb95ca962ab01 | [
"MIT"
] | null | null | null | ML_and_DL/loss_fucntion.py | AshfakYeafi/AI_practice_code | 3d8a0b9382f5903e840ce59218ebb95ca962ab01 | [
"MIT"
] | null | null | null | import numpy as np
y=np.array([1,0,1,1,1,0,0,1,0,1])
y_predict=np.array([1,1,1,0,1,0,0,1,0,0])
# −(ylog(p)+(1−y)log(1−p))
ellipsis = 1e-15
def log_loss(y,y_predict):
ellipsis=1e-15
y_predict = np.array([max(i, ellipsis) for i in y_predict])
y_predict = np.array([min(i, 1 - ellipsis) for i in y_predict])
return sum(-(y*np.log(y_predict)+(1-y)*np.log(1-y_predict)))/len(y)
print(log_loss(y,y_predict))
| 30 | 71 | 0.645238 |
4eec0382de1df8770e61f26998ee6b937f362553 | 2,473 | py | Python | moderngl_window/context/sdl2/keys.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 142 | 2019-11-11T23:14:28.000Z | 2022-03-29T08:37:03.000Z | moderngl_window/context/sdl2/keys.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 107 | 2019-10-31T20:31:45.000Z | 2022-03-23T15:01:41.000Z | moderngl_window/context/sdl2/keys.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 36 | 2019-12-12T16:14:10.000Z | 2022-01-18T22:58:21.000Z | # flake8: noqa E741
import sdl2
from moderngl_window.context.base import BaseKeys
class Keys(BaseKeys):
"""
Namespace mapping SDL2 specific key constants
"""
ACTION_PRESS = sdl2.SDL_KEYDOWN
ACTION_RELEASE = sdl2.SDL_KEYUP
ESCAPE = sdl2.SDLK_ESCAPE
SPACE = sdl2.SDLK_SPACE
ENTER = sdl2.SDLK_RETURN
PAGE_UP = sdl2.SDLK_PAGEUP
PAGE_DOWN = sdl2.SDLK_PAGEDOWN
LEFT = sdl2.SDLK_LEFT
RIGHT = sdl2.SDLK_RIGHT
UP = sdl2.SDLK_UP
DOWN = sdl2.SDLK_DOWN
TAB = sdl2.SDLK_TAB
COMMA = sdl2.SDLK_COMMA
MINUS = sdl2.SDLK_MINUS
PERIOD = sdl2.SDLK_PERIOD
SLASH = sdl2.SDLK_SLASH
SEMICOLON = sdl2.SDLK_SEMICOLON
EQUAL = sdl2.SDLK_EQUALS
LEFT_BRACKET = sdl2.SDLK_LEFTBRACKET
RIGHT_BRACKET = sdl2.SDLK_RIGHTBRACKET
BACKSLASH = sdl2.SDLK_BACKSLASH
BACKSPACE = sdl2.SDLK_BACKSPACE
INSERT = sdl2.SDLK_INSERT
DELETE = sdl2.SDLK_DELETE
HOME = sdl2.SDLK_HOME
END = sdl2.SDLK_END
CAPS_LOCK = sdl2.SDLK_CAPSLOCK
F1 = sdl2.SDLK_F1
F2 = sdl2.SDLK_F2
F3 = sdl2.SDLK_F3
F4 = sdl2.SDLK_F4
F5 = sdl2.SDLK_F5
F6 = sdl2.SDLK_F6
F7 = sdl2.SDLK_F7
F8 = sdl2.SDLK_F8
F9 = sdl2.SDLK_F9
F10 = sdl2.SDLK_F10
F11 = sdl2.SDLK_F11
F12 = sdl2.SDLK_F12
NUMBER_0 = sdl2.SDLK_0
NUMBER_1 = sdl2.SDLK_1
NUMBER_2 = sdl2.SDLK_2
NUMBER_3 = sdl2.SDLK_3
NUMBER_4 = sdl2.SDLK_4
NUMBER_5 = sdl2.SDLK_5
NUMBER_6 = sdl2.SDLK_6
NUMBER_7 = sdl2.SDLK_7
NUMBER_8 = sdl2.SDLK_8
NUMBER_9 = sdl2.SDLK_9
NUMPAD_0 = sdl2.SDLK_KP_0
NUMPAD_1 = sdl2.SDLK_KP_1
NUMPAD_2 = sdl2.SDLK_KP_2
NUMPAD_3 = sdl2.SDLK_KP_3
NUMPAD_4 = sdl2.SDLK_KP_4
NUMPAD_5 = sdl2.SDLK_KP_5
NUMPAD_6 = sdl2.SDLK_KP_6
NUMPAD_7 = sdl2.SDLK_KP_7
NUMPAD_8 = sdl2.SDLK_KP_8
NUMPAD_9 = sdl2.SDLK_KP_9
A = sdl2.SDLK_a
B = sdl2.SDLK_b
C = sdl2.SDLK_c
D = sdl2.SDLK_d
E = sdl2.SDLK_e
F = sdl2.SDLK_f
G = sdl2.SDLK_g
H = sdl2.SDLK_h
I = sdl2.SDLK_i
J = sdl2.SDLK_j
K = sdl2.SDLK_k
L = sdl2.SDLK_l
M = sdl2.SDLK_m
N = sdl2.SDLK_n
O = sdl2.SDLK_o
P = sdl2.SDLK_p
Q = sdl2.SDLK_q
R = sdl2.SDLK_r
S = sdl2.SDLK_s
T = sdl2.SDLK_t
U = sdl2.SDLK_u
V = sdl2.SDLK_v
W = sdl2.SDLK_w
X = sdl2.SDLK_x
Y = sdl2.SDLK_y
Z = sdl2.SDLK_z
| 24.009709 | 50 | 0.627173 |
164c03509ca80665fb3bd5a5d50998bd1fd59ebc | 522 | py | Python | src/core_backend/utils/json_print.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | null | null | null | src/core_backend/utils/json_print.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | 5 | 2021-01-28T21:18:27.000Z | 2022-03-25T19:10:01.000Z | src/core_backend/utils/json_print.py | jhchen3121/wechat_shop | c9d9ad009df1e5bb0eb23ca8d830dd5c15df5328 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import json
from core_backend.utils.json import CustomerEncoder
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
def json_pretty_print(j_obj):
json_str = json.dumps(j_obj, cls=CustomerEncoder, indent=4, ensure_ascii=False, encoding="utf8")
print(highlight(json_str, JsonLexer(), TerminalFormatter()).encode("utf8"))
| 32.625 | 100 | 0.796935 |
44e1ab3156b8fe736839b72898a10a169b47b337 | 2,663 | py | Python | kornia/losses/divergence.py | ChristophReich1996/kornia | 35f955b46e8015da1cb9faa28c6943ec2b09cc2a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/losses/divergence.py | ChristophReich1996/kornia | 35f955b46e8015da1cb9faa28c6943ec2b09cc2a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/losses/divergence.py | ChristophReich1996/kornia | 35f955b46e8015da1cb9faa28c6943ec2b09cc2a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | r"""Losses based on the divergence between probability distributions."""
import torch
import torch.nn.functional as F
def _kl_div_2d(p: torch.Tensor, q: torch.Tensor) -> torch.Tensor:
# D_KL(P || Q)
batch, chans, height, width = p.shape
unsummed_kl = F.kl_div(
q.reshape(batch * chans, height * width).log(),
p.reshape(batch * chans, height * width),
reduction='none',
)
kl_values = unsummed_kl.sum(-1).view(batch, chans)
return kl_values
def _js_div_2d(p: torch.Tensor, q: torch.Tensor) -> torch.Tensor:
# JSD(P || Q)
m = 0.5 * (p + q)
return 0.5 * _kl_div_2d(p, m) + 0.5 * _kl_div_2d(q, m)
# TODO: add this to the main module
def _reduce_loss(losses: torch.Tensor, reduction: str) -> torch.Tensor:
if reduction == 'none':
return losses
return torch.mean(losses) if reduction == 'mean' else torch.sum(losses)
def js_div_loss_2d(input: torch.Tensor, target: torch.Tensor, reduction: str = 'mean'):
r"""Calculates the Jensen-Shannon divergence loss between heatmaps.
Args:
input (torch.Tensor): the input tensor with shape :math:`(B, N, H, W)`.
target (torch.Tensor): the target tensor with shape :math:`(B, N, H, W)`.
reduction (string, optional): Specifies the reduction to apply to the
output: `none` | `mean` | `sum`. `none`: no reduction
will be applied, `mean`: the sum of the output will be divided by
the number of elements in the output, `sum`: the output will be
summed. Default: `mean`.
Examples:
>>> input = torch.full((1, 1, 2, 4), 0.125)
>>> loss = js_div_loss_2d(input, input)
>>> loss.item()
0.0
"""
return _reduce_loss(_js_div_2d(target, input), reduction)
def kl_div_loss_2d(input: torch.Tensor, target: torch.Tensor, reduction: str = 'mean'):
r"""Calculates the Kullback-Leibler divergence loss between heatmaps.
Args:
input (torch.Tensor): the input tensor with shape :math:`(B, N, H, W)`.
target (torch.Tensor): the target tensor with shape :math:`(B, N, H, W)`.
reduction (string, optional): Specifies the reduction to apply to the
output: `none` | `mean` | `sum`. `none`: no reduction
will be applied, `mean`: the sum of the output will be divided by
the number of elements in the output, `sum`: the output will be
summed. Default: `mean`.
Examples:
>>> input = torch.full((1, 1, 2, 4), 0.125)
>>> loss = js_div_loss_2d(input, input)
>>> loss.item()
0.0
"""
return _reduce_loss(_kl_div_2d(target, input), reduction)
| 35.986486 | 87 | 0.622231 |
44ac336ed558349611b5be390683c1fe716d316f | 1,032 | py | Python | djAidESILV/shopping_cart/models.py | Kulumbaf/AidESILV | 04dad828048edffdd3662b24c415edce22fd3ea3 | [
"MIT"
] | null | null | null | djAidESILV/shopping_cart/models.py | Kulumbaf/AidESILV | 04dad828048edffdd3662b24c415edce22fd3ea3 | [
"MIT"
] | null | null | null | djAidESILV/shopping_cart/models.py | Kulumbaf/AidESILV | 04dad828048edffdd3662b24c415edce22fd3ea3 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from account.models import UserProfile
from products.models import Product
class OrderItem(models.Model):
product = models.OneToOneField(Product, on_delete=models.SET_NULL, null=True)
is_ordered = models.BooleanField(default=False)
date_added = models.DateTimeField(auto_now=True)
date_ordered = models.DateTimeField(null=True)
def __str__(self):
return self.product.name
class Order(models.Model):
ref_code = models.CharField(max_length=15)
owner = models.ForeignKey(UserProfile, on_delete=models.SET_NULL, null=True)
is_ordered = models.BooleanField(default=False)
items = models.ManyToManyField(OrderItem)
date_ordered = models.DateTimeField(auto_now=True)
def get_cart_items(self):
return self.items.all()
def get_cart_total(self):
return sum([item.product.price for item in self.items.all()])
def __str__(self):
return '{0} - {1}'.format(self.owner, self.ref_code) | 31.272727 | 81 | 0.737403 |
9835c10978719609d7260513131261f1b1a0579d | 16,506 | py | Python | scripts/create_fakes.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | scripts/create_fakes.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | scripts/create_fakes.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python -m scripts.create_fakes --user fred@cos.io
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
"""
from __future__ import print_function
import sys
import argparse
import logging
from modularodm.query.querydialect import DefaultQueryDialect as Q
from faker import Factory
from framework.auth import Auth
from website.app import init_app
from website import models, security
from framework.auth import utils
from tests.factories import UserFactory, ProjectFactory, NodeFactory
from faker.providers import BaseProvider
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return " ".join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return " ".join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return "".join(text)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.ERROR)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake.email()
name = fake.name()
parsed = utils.impute_names(name)
user = UserFactory.build(username=email, fullname=name,
is_registered=True, is_claimed=True,
verification_key=security.random_string(15),
date_registered=fake.date_time(),
emails=[email],
**parsed
)
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1}>'.format(user.fullname, user.username))
return user
def parse_args():
parser = argparse.ArgumentParser(description='Create fake data.')
parser.add_argument('-u', '--user', dest='user', required=True)
parser.add_argument('--nusers', dest='n_users', type=int, default=3)
parser.add_argument('--nprojects', dest='n_projects', type=int, default=3)
parser.add_argument('--ncomponents', dest='n_components', type=int, default=0)
parser.add_argument('-p', '--privacy', dest="privacy", type=str, default='private', choices=['public', 'private'])
parser.add_argument('-n', '--name', dest='name', type=str, default=None)
parser.add_argument('-t', '--tags', dest='n_tags', type=int, default=5)
parser.add_argument('--presentation', dest='presentation_name', type=str, default=None)
return parser.parse_args()
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags, presentation_name):
auth = Auth(user=creator)
project_title = name if name else fake.science_sentence()
project = ProjectFactory.build(title=project_title, description=fake.science_paragraph(), creator=creator)
project.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
project.add_contributor(contrib, auth=auth)
for _ in range(n_components):
NodeFactory(project=project, title=fake.science_sentence(), description=fake.science_paragraph(),
creator=creator)
for _ in range(n_tags):
project.add_tag(fake.science_word(), auth=auth)
if presentation_name is not None:
project.add_tag(presentation_name, auth=auth)
project.add_tag('poster', auth=auth)
project.save()
logger.info('Created project: {0}'.format(project.title))
return project
def main():
args = parse_args()
creator = models.User.find(Q('username', 'eq', args.user))[0]
for i in range(args.n_projects):
name = args.name + str(i) if args.name else ''
create_fake_project(creator, args.n_users, args.privacy, args.n_components, name, args.n_tags,
args.presentation_name)
print('Created {n} fake projects.'.format(n=args.n_projects))
sys.exit(0)
if __name__ == '__main__':
app = init_app('website.settings', set_backends=True, routes=True)
main()
| 53.073955 | 118 | 0.591058 |
11ccd1f6ff25eb6ec3298ee5dc66c04fb6232f1c | 3,038 | py | Python | TemaLib/tema/coverage/dummycoverage.py | tema-tut/tema-tg | 9c3f119c8bf5cc565e6a3e8e9e6205037e326d89 | [
"MIT"
] | null | null | null | TemaLib/tema/coverage/dummycoverage.py | tema-tut/tema-tg | 9c3f119c8bf5cc565e6a3e8e9e6205037e326d89 | [
"MIT"
] | null | null | null | TemaLib/tema/coverage/dummycoverage.py | tema-tut/tema-tg | 9c3f119c8bf5cc565e6a3e8e9e6205037e326d89 | [
"MIT"
] | 1 | 2021-03-27T21:27:32.000Z | 2021-03-27T21:27:32.000Z | #!/usr/bin/env python
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Dummy coverage implements requirement interface but gives always zero
percentage. This can be used for test runs that should run without
stopping condition.
"""
# tema libraries:
import tema.coverage.coverage as coverage
# python standard
import re
class DummyCoverage(coverage.CoverageStorage, coverage.Requirement):
def __init__(self, reqstr, **kw):
# CoverageStorage constructor is skipped here
# because there should be no need to use this as
# a coverage storage. That is, this coverage should
# not be used with guidance algorithms that calculate
# forward. If it still is, a warning is given
self._warning_given = 0
if reqstr!="":
self.log("WARNING: DummyCoverage got a non-empty coverage requirement: '%s'" % reqstr)
self.log("Initialized")
def setParameter(self, parametername, value ):
print __doc__
raise Exception("Invalid parameter '%s' for dummycoverage." % parametername)
# CoverageStorage interface:
def markExecuted(self,transition):
# Mark execute is quite ok. We just do not care.
pass
def push(self):
# Push and pop are bad. Someone is trying to see if
# percentage gets better after a few steps. It won't,
# because this gives always zero percentage. Therefore,
# give warning.
if self._warning_given == 0:
self._warning_given = 1
self.log("WARNING: DummyCoverage is used with too smart guidance algorithm.")
def pop(self):
# see push()
self.push()
# Requirement interface:
def getPercentage(self):
return 0.0
def pickDataValue(self,set_of_possible_values):
# This coverage requirement does not favor any particular data
# values.
return None
CoverageRequirement = DummyCoverage
| 36.60241 | 98 | 0.704411 |
bf23af668967713752b766aaa38ef65787a2a859 | 2,187 | py | Python | backend/test_34087/urls.py | crowdbotics-apps/test-34087 | e04f7ebd844388dae45dbf765e988716c3a0aeb5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/test_34087/urls.py | crowdbotics-apps/test-34087 | e04f7ebd844388dae45dbf765e988716c3a0aeb5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/test_34087/urls.py | crowdbotics-apps/test-34087 | e04f7ebd844388dae45dbf765e988716c3a0aeb5 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """test_34087 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Test"
admin.site.site_title = "Test Admin Portal"
admin.site.index_title = "Test Admin"
# swagger
api_info = openapi.Info(
title="Test API",
default_version="v1",
description="API documentation for Test App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 34.714286 | 87 | 0.709191 |
58d5048ecfe35af7e845e7b271b9793f82e1e34e | 1,996 | py | Python | cvpods/layers/swap_align2nat.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/layers/swap_align2nat.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | cvpods/layers/swap_align2nat.py | hanqiu-hq/cvpods | 597fa669151fdad87c250fa118a9e3a555f4fb5e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from cvpods import _C
class _SwapAlign2Nat(Function):
@staticmethod
def forward(ctx, X, lambda_val, pad_val):
ctx.lambda_val = lambda_val
ctx.input_shape = X.size()
Y = _C.swap_align2nat_forward(X, lambda_val, pad_val)
return Y
@staticmethod
@once_differentiable
def backward(ctx, gY):
lambda_val = ctx.lambda_val
bs, ch, h, w = ctx.input_shape
gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w)
return gX, None, None
swap_align2nat = _SwapAlign2Nat.apply
class SwapAlign2Nat(nn.Module):
"""
The op `SwapAlign2Nat` described in https://arxiv.org/abs/1903.12174.
Given an input tensor that predicts masks of shape (N, C=VxU, H, W),
apply the op, it will return masks of shape (N, V'xU', H', W') where
the unit lengths of (V, U) and (H, W) are swapped, and the mask representation
is transformed from aligned to natural.
Args:
lambda_val (int): the relative unit length ratio between (V, U) and (H, W),
as we always have larger unit lengths for (V, U) than (H, W),
lambda_val is always >= 1.
pad_val (float): padding value for the values falling outside of the input
tensor, default set to -6 as sigmoid(-6) is ~0, indicating
that is no masks outside of the tensor.
"""
def __init__(self, lambda_val, pad_val=-6.0):
super(SwapAlign2Nat, self).__init__()
self.lambda_val = lambda_val
self.pad_val = pad_val
def forward(self, X):
return swap_align2nat(X, self.lambda_val, self.pad_val)
def extra_repr(self):
tmpstr = "lambda_val=" + str(self.lambda_val)
tmpstr += ", pad_val=" + str(self.pad_val)
return tmpstr
| 33.266667 | 87 | 0.64479 |
57fb13d098052cbca253144c458e8a54087f07f8 | 15,734 | py | Python | pyzoo/zoo/ray/raycontext.py | taogeanton2/analytics-zoo | e111f445486d971c6dbc3bfde313cff9842cc163 | [
"Apache-2.0"
] | 1 | 2020-05-18T02:37:10.000Z | 2020-05-18T02:37:10.000Z | pyzoo/zoo/ray/raycontext.py | taogeanton2/analytics-zoo | e111f445486d971c6dbc3bfde313cff9842cc163 | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/ray/raycontext.py | taogeanton2/analytics-zoo | e111f445486d971c6dbc3bfde313cff9842cc163 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import random
import signal
import multiprocessing
from pyspark import BarrierTaskContext
from zoo.ray.process import session_execute, ProcessMonitor
from zoo.ray.utils import is_local
from zoo.ray.utils import resource_to_bytes
class JVMGuard:
"""
The registered pids would be put into the killing list of Spark Executor.
"""
@staticmethod
def register_pids(pids):
import traceback
try:
from zoo.common.utils import callZooFunc
import zoo
callZooFunc("float",
"jvmGuardRegisterPids",
pids)
except Exception as err:
print(traceback.format_exc())
print("Cannot sucessfully register pid into JVMGuard")
for pid in pids:
os.kill(pid, signal.SIGKILL)
raise err
class RayServiceFuncGenerator(object):
"""
This should be a pickable class.
"""
def _prepare_env(self):
modified_env = os.environ.copy()
cwd = os.getcwd()
modified_env["PATH"] = "{}/{}:{}".format(cwd, "/".join(self.python_loc.split("/")[:-1]),
os.environ["PATH"])
modified_env.pop("MALLOC_ARENA_MAX", None)
modified_env.pop("RAY_BACKEND_LOG_LEVEL", None)
# Unset all MKL setting as Analytics Zoo would give default values when init env.
# Running different programs may need different configurations.
modified_env.pop("intra_op_parallelism_threads", None)
modified_env.pop("inter_op_parallelism_threads", None)
modified_env.pop("OMP_NUM_THREADS", None)
modified_env.pop("KMP_BLOCKTIME", None)
modified_env.pop("KMP_AFFINITY", None)
modified_env.pop("KMP_SETTINGS", None)
if self.env: # Add in env argument if any MKL setting is needed.
modified_env.update(self.env)
if self.verbose:
print("Executing with these environment setting:")
for pair in modified_env.items():
print(pair)
print("The $PATH is: {}".format(modified_env["PATH"]))
return modified_env
def __init__(self, python_loc, redis_port, ray_node_cpu_cores,
password, object_store_memory, verbose=False, env=None,
extra_params=None):
"""object_store_memory: integer in bytes"""
self.env = env
self.python_loc = python_loc
self.redis_port = redis_port
self.password = password
self.ray_node_cpu_cores = ray_node_cpu_cores
self.ray_exec = self._get_ray_exec()
self.object_store_memory = object_store_memory
self.extra_params = extra_params
self.verbose = verbose
# _mxnet_worker and _mxnet_server are resource tags for distributed MXNet training only
# in order to diff worker from server.
# This is useful to allocate workers and servers in the cluster.
# Leave some reserved custom resources free to avoid unknown crash due to resources.
self.labels = \
"""--resources='{"_mxnet_worker": %s, "_mxnet_server": %s, "_reserved": %s}' """ \
% (1, 1, 2)
def gen_stop(self):
def _stop(iter):
command = "{} stop".format(self.ray_exec)
print("Start to end the ray services: {}".format(command))
session_execute(command=command, fail_fast=True)
return iter
return _stop
@staticmethod
def _enrich_command(command, object_store_memory, extra_params):
if object_store_memory:
command = command + "--object-store-memory {} ".format(str(object_store_memory))
if extra_params:
for pair in extra_params.items():
command = command + " --{} {} ".format(pair[0], pair[1])
return command
def _gen_master_command(self):
command = "{} start --head " \
"--include-webui true --redis-port {} " \
"--redis-password {} --num-cpus {} {}". \
format(self.ray_exec, self.redis_port, self.password,
self.ray_node_cpu_cores, self.labels)
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params)
@staticmethod
def _get_raylet_command(redis_address,
ray_exec,
password,
ray_node_cpu_cores,
labels="",
object_store_memory=None,
extra_params=None):
command = "{} start --address {} --redis-password {} --num-cpus {} {} ".format(
ray_exec, redis_address, password, ray_node_cpu_cores, labels)
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=object_store_memory,
extra_params=extra_params)
def _start_ray_node(self, command, tag):
modified_env = self._prepare_env()
print("Starting {} by running: {}".format(tag, command))
process_info = session_execute(command=command, env=modified_env, tag=tag)
JVMGuard.register_pids(process_info.pids)
import ray.services as rservices
process_info.node_ip = rservices.get_node_ip_address()
return process_info
def _get_ray_exec(self):
python_bin_dir = "/".join(self.python_loc.split("/")[:-1])
return "{}/python {}/ray".format(python_bin_dir, python_bin_dir)
def gen_ray_start(self):
def _start_ray_services(iter):
tc = BarrierTaskContext.get()
# The address is sorted by partitionId according to the comments
# Partition 0 is the Master
task_addrs = [taskInfo.address for taskInfo in tc.getTaskInfos()]
print(task_addrs)
master_ip = task_addrs[0].split(":")[0]
print("current address {}".format(task_addrs[tc.partitionId()]))
print("master address {}".format(master_ip))
redis_address = "{}:{}".format(master_ip, self.redis_port)
process_info = None
if tc.partitionId() == 0:
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(command=self._gen_master_command(),
tag="ray-master")
process_info.master_addr = redis_address
tc.barrier()
if tc.partitionId() != 0:
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(
command=RayServiceFuncGenerator._get_raylet_command(
redis_address=redis_address,
ray_exec=self.ray_exec,
password=self.password,
ray_node_cpu_cores=self.ray_node_cpu_cores,
labels=self.labels,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params),
tag="raylet")
yield process_info
return _start_ray_services
class RayContext(object):
def __init__(self, sc, redis_port=None, password="123456", object_store_memory=None,
verbose=False, env=None, extra_params=None):
"""
The RayContext would initiate a ray cluster on top of the configuration of SparkContext.
After creating RayContext, call the init method to set up the cluster.
- For Spark local mode: The total available cores is equal to Spark local cores.
- For Spark cluster mode: The number of raylets is equal to number of executors.
The number of available cores for each raylet is equal to executor cores.
:param sc: An instance of SparkContext.
:param redis_port: redis port for the "head" node.
The value would be randomly picked if not specified.
:param password: Password for the redis. Default to be "123456" if not specified.
:param object_store_memory: The memory size for ray object_store in string.
This can be specified in bytes(b), kilobytes(k), megabytes(m) or gigabytes(g).
For example, 50b, 100k, 250m, 30g.
:param verbose: True for more logs when starting ray. Default is False.
:param env: The environment variable dict for running ray processes. Default is None.
:param extra_params: The key value dict for extra options to launch ray.
For example, extra_params={"temp-dir": "/tmp/ray/"}
"""
assert sc is not None, "sc cannot be None, please create a SparkContext first"
self.sc = sc
self.stopped = False
self.is_local = is_local(sc)
self.verbose = verbose
self.redis_password = password
self.object_store_memory = resource_to_bytes(object_store_memory)
self.ray_processesMonitor = None
self.env = env
self.extra_params = extra_params
if self.is_local:
self.num_ray_nodes = 1
self.ray_node_cpu_cores = self._get_spark_local_cores()
# For Spark local mode, directly call ray.init() and ray.shutdown().
# ray.shutdown() would clear up all the ray related processes.
# Ray Manager is only needed for Spark cluster mode to monitor ray processes.
else:
self.num_ray_nodes = int(self.sc.getConf().get("spark.executor.instances"))
self.ray_node_cpu_cores = int(self.sc.getConf().get("spark.executor.cores"))
self.python_loc = os.environ['PYSPARK_PYTHON']
self.redis_port = random.randint(10000, 65535) if not redis_port else redis_port
self.ray_service = RayServiceFuncGenerator(
python_loc=self.python_loc,
redis_port=self.redis_port,
ray_node_cpu_cores=self.ray_node_cpu_cores,
password=self.redis_password,
object_store_memory=self.object_store_memory,
verbose=self.verbose,
env=self.env,
extra_params=self.extra_params)
self._gather_cluster_ips()
from bigdl.util.common import init_executor_gateway
print("Start to launch the JVM guarding process")
init_executor_gateway(sc)
print("JVM guarding process has been successfully launched")
def _gather_cluster_ips(self):
total_cores = int(self.num_ray_nodes) * int(self.ray_node_cpu_cores)
def info_fn(iter):
tc = BarrierTaskContext.get()
task_addrs = [taskInfo.address.split(":")[0] for taskInfo in tc.getTaskInfos()]
yield task_addrs
tc.barrier()
ips = self.sc.range(0, total_cores,
numSlices=total_cores).barrier().mapPartitions(info_fn).collect()
return ips[0]
def stop(self):
if self.stopped:
print("This instance has been stopped.")
return
import ray
ray.shutdown()
if not self.is_local:
if not self.ray_processesMonitor:
print("Please start the runner first before closing it")
else:
self.ray_processesMonitor.clean_fn()
self.stopped = True
def purge(self):
"""
Invoke ray stop to clean ray processes.
"""
if self.stopped:
print("This instance has been stopped.")
return
if self.is_local:
import ray
ray.shutdown()
else:
self.sc.range(0,
self.num_ray_nodes,
numSlices=self.num_ray_nodes).barrier().mapPartitions(
self.ray_service.gen_stop()).collect()
self.stopped = True
def _get_spark_local_cores(self):
local_symbol = re.match(r"local\[(.*)\]", self.sc.master).group(1)
if local_symbol == "*":
return multiprocessing.cpu_count()
else:
return int(local_symbol)
def init(self, driver_cores=0):
"""
Initiate the ray cluster.
:param driver_cores: The number of cores for the raylet on driver for Spark cluster mode.
Default is 0 and in this case the local driver wouldn't have any ray workload.
"""
self.stopped = False
if self.is_local:
if self.env:
os.environ.update(self.env)
import ray
ray.init(num_cpus=self.ray_node_cpu_cores,
object_store_memory=self.object_store_memory,
resources=self.extra_params)
else:
self._start_cluster()
self._start_driver(num_cores=driver_cores)
def _start_cluster(self):
print("Start to launch ray on cluster")
ray_rdd = self.sc.range(0, self.num_ray_nodes,
numSlices=self.num_ray_nodes)
process_infos = ray_rdd.barrier().mapPartitions(
self.ray_service.gen_ray_start()).collect()
self.ray_processesMonitor = ProcessMonitor(process_infos, self.sc, ray_rdd, self,
verbose=self.verbose)
self.redis_address = self.ray_processesMonitor.master.master_addr
return self
def _start_restricted_worker(self, num_cores, node_ip_address):
extra_param = {"node-ip-address": node_ip_address}
if self.extra_params is not None:
extra_param.update(self.extra_params)
command = RayServiceFuncGenerator._get_raylet_command(
redis_address=self.redis_address,
ray_exec="ray ",
password=self.redis_password,
ray_node_cpu_cores=num_cores,
object_store_memory=self.object_store_memory,
extra_params=extra_param)
print("Executing command: {}".format(command))
process_info = session_execute(command=command, fail_fast=True)
ProcessMonitor.register_shutdown_hook(pgid=process_info.pgid)
def _start_driver(self, num_cores=0):
print("Start to launch ray driver on local")
import ray
if not self.is_local:
import ray.services
node_ip = ray.services.get_node_ip_address(self.redis_address)
self._start_restricted_worker(num_cores=num_cores,
node_ip_address=node_ip)
ray.shutdown()
ray.init(address=self.redis_address,
redis_password=self.ray_service.password,
node_ip_address=node_ip)
else:
ray.shutdown()
ray.init(address=self.redis_address,
redis_password=self.ray_service.password)
| 43.464088 | 100 | 0.60722 |
fd02b1f55bd7f591a8c2f654b1d53ca75fd4e963 | 820 | py | Python | alipay/aop/api/domain/AlipayOpenXwbtesttomsgapiSyncModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayOpenXwbtesttomsgapiSyncModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayOpenXwbtesttomsgapiSyncModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenXwbtesttomsgapiSyncModel(object):
def __init__(self):
self._xwb = None
@property
def xwb(self):
return self._xwb
@xwb.setter
def xwb(self, value):
self._xwb = value
def to_alipay_dict(self):
params = dict()
if self.xwb:
if hasattr(self.xwb, 'to_alipay_dict'):
params['xwb'] = self.xwb.to_alipay_dict()
else:
params['xwb'] = self.xwb
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenXwbtesttomsgapiSyncModel()
if 'xwb' in d:
o.xwb = d['xwb']
return o
| 20 | 57 | 0.558537 |
43e7468c37ee21f3be050bab3fc7c2c58568418c | 1,844 | py | Python | mat.py | arcangelo7/Meta-Analyser-Tool | c6fc6bb1870a10efe39634687b388946dd8bb762 | [
"0BSD"
] | null | null | null | mat.py | arcangelo7/Meta-Analyser-Tool | c6fc6bb1870a10efe39634687b388946dd8bb762 | [
"0BSD"
] | null | null | null | mat.py | arcangelo7/Meta-Analyser-Tool | c6fc6bb1870a10efe39634687b388946dd8bb762 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019,
# Silvio Peroni <silvio.peroni@unibo.it>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
# The following importing line is used to include in the definition of this class
# the particular functions implemented by a group. The 'my_test_group' module specified
# here is just a placeholder, since it defines only the signature of the various
# functions but it returns always None.
from acari import *
class MetaAnalyserTool(object):
def __init__(self, metadata_file_path):
self.data = process_metadata(metadata_file_path)
def get_ids(self, str_value, field_set):
return do_get_ids(self.data, str_value, field_set)
def get_by_id(self, id, field_set):
return do_get_by_id(self.data, id, field_set)
def filter(self, field_value_list):
return do_filter(self.data, field_value_list)
def coauthor_graph(self, author_id, level):
return do_coauthor_graph(self.data, author_id, level)
def author_network(self):
return do_author_network(self.data)
def retrieve_tree_of_venues(self, no_ids):
return do_retrieve_tree_of_venues(self.data, no_ids)
| 40.977778 | 87 | 0.755423 |
40ee4ed83f923065e39ef73fb28892862e50e4db | 1,264 | py | Python | quantnode/structures.py | quantnode/quantnode-client | 64c849d1f79e1113b7d408a6727284c3ff5b054e | [
"MIT"
] | 1 | 2015-03-07T18:57:11.000Z | 2015-03-07T18:57:11.000Z | quantnode/structures.py | quantnode/quantnode-client | 64c849d1f79e1113b7d408a6727284c3ff5b054e | [
"MIT"
] | null | null | null | quantnode/structures.py | quantnode/quantnode-client | 64c849d1f79e1113b7d408a6727284c3ff5b054e | [
"MIT"
] | null | null | null | class DotDict(dict):
"""
A 'dotted dictionary'
Simple dictionary wrapper that overrides the dot operator with dictionary getter and setter
so that instance['value'] is equivalent to instance.value
"""
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__:
return super(DotDict, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError, err:
# return None
return float('nan')
# raise AttributeError(*err.args)
@staticmethod
def ToDotDict(data):
"""
Recurisvely transforms a dict to a dotted dictionary
"""
if isinstance(data, dict):
for k, v in data.iteritems():
if isinstance(v, dict):
data[k] = DotDict(v)
DotDict.ToDotDict(data[k])
elif isinstance(v, list):
data[k] = [DotDict.ToDotDict(i) for i in v]
elif isinstance(data, list):
return [DotDict.ToDotDict(i) for i in data]
else:
return data
return DotDict(data)
| 29.395349 | 95 | 0.528481 |
77e86dadcc855cf0ba381407dc2772546becf579 | 2,470 | py | Python | interview_qns/linked_list.py | shiram/learning-django-rest | 3e6d26212429c423851cef047eea3c7a820e8e58 | [
"Apache-2.0"
] | null | null | null | interview_qns/linked_list.py | shiram/learning-django-rest | 3e6d26212429c423851cef047eea3c7a820e8e58 | [
"Apache-2.0"
] | null | null | null | interview_qns/linked_list.py | shiram/learning-django-rest | 3e6d26212429c423851cef047eea3c7a820e8e58 | [
"Apache-2.0"
] | null | null | null | class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SingleLinkedList:
def __init__(self):
self.headval = None
def traverse(self):
printval = self.headval
list_string = ""
while printval is not None:
list_string += str(printval.dataval) + "->"
printval = printval.nextval
list_string += "(Next Node)"
print(list_string)
def valueExists(self, value):
current_value = self.headval
while current_value is not None:
if (current_value.dataval == value) and (type(current_value.dataval) == type(value)):
return True
current_value = current_value.nextval
return False
def insertAtBegin(self, newdata):
newNode = Node(newdata)
newNode.nextval = self.headval
self.headval = newNode
def insertAtEnd(self, newdata):
newNode = Node(newdata)
if self.headval is None:
self.headval = newNode
return
last = self.headval
while(last.nextval):
last = last.nextval
last.nextval = newNode
def insertInbetween(self, middle_node, newdata):
if middle_node is None:
print("The mentioned node is absent")
return
newNode = Node(newdata)
newNode.nextval = middle_node.nextval
middle_node.nextval = newNode
def removeNode(self, remove_key):
headVal = self.headval
if (headVal is not None):
if (headVal.dataval == remove_key):
self.headval = headVal.nextval
headVal = None
return
while headVal is not None:
if headVal.dataval == remove_key:
break
prev = headVal
headVal = headVal.nextval
if headVal == None:
return
prev.nextval = headVal.nextval
headVal = None
list1 = SingleLinkedList()
list1.headval = Node("Mon")
e2, e3, e4 = Node("Tue"), Node("Wed"), Node("Thur")
list1.headval.nextval = e2
e2.nextval = e3
e3.nextval = e4
list1.insertAtBegin("Sun")
list1.insertAtEnd("Fri")
list1.insertInbetween(e3.nextval, "WhatDay")
list1.insertAtEnd(1)
list1.insertAtBegin(0)
list1.insertAtEnd(False)
list1.removeNode("WhatDay")
list1.traverse()
if list1.valueExists(False):
print("Value Exists")
else:
print("Value Does not Exist")
| 27.752809 | 97 | 0.601215 |
abdce969985bba575a471cbd06f1f9eaccbc27eb | 6,647 | py | Python | .pc/hg-updates.diff/Lib/tkinter/test/test_tkinter/test_variables.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 486 | 2016-05-28T18:51:54.000Z | 2022-03-20T17:30:31.000Z | .pc/hg-updates.diff/Lib/tkinter/test/test_tkinter/test_variables.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 40 | 2016-05-29T00:24:56.000Z | 2020-07-13T11:56:58.000Z | .pc/hg-updates.diff/Lib/tkinter/test/test_tkinter/test_variables.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 74 | 2015-05-29T17:18:53.000Z | 2022-01-15T14:06:44.000Z | import unittest
from tkinter import (Variable, StringVar, IntVar, DoubleVar, BooleanVar, Tcl,
TclError)
class Var(Variable):
_default = "default"
side_effect = False
def set(self, value):
self.side_effect = True
super().set(value)
class TestBase(unittest.TestCase):
def setUp(self):
self.root = Tcl()
def tearDown(self):
del self.root
class TestVariable(TestBase):
def info_exists(self, *args):
return self.root.getboolean(self.root.call("info", "exists", *args))
def test_default(self):
v = Variable(self.root)
self.assertEqual("", v.get())
self.assertRegex(str(v), r"^PY_VAR(\d+)$")
def test_name_and_value(self):
v = Variable(self.root, "sample string", "varname")
self.assertEqual("sample string", v.get())
self.assertEqual("varname", str(v))
def test___del__(self):
self.assertFalse(self.info_exists("varname"))
v = Variable(self.root, "sample string", "varname")
self.assertTrue(self.info_exists("varname"))
del v
self.assertFalse(self.info_exists("varname"))
def test_dont_unset_not_existing(self):
self.assertFalse(self.info_exists("varname"))
v1 = Variable(self.root, name="name")
v2 = Variable(self.root, name="name")
del v1
self.assertFalse(self.info_exists("name"))
# shouldn't raise exception
del v2
self.assertFalse(self.info_exists("name"))
def test___eq__(self):
# values doesn't matter, only class and name are checked
v1 = Variable(self.root, name="abc")
v2 = Variable(self.root, name="abc")
self.assertEqual(v1, v2)
v3 = Variable(self.root, name="abc")
v4 = StringVar(self.root, name="abc")
self.assertNotEqual(v3, v4)
def test_invalid_name(self):
with self.assertRaises(TypeError):
Variable(self.root, name=123)
def test_null_in_name(self):
with self.assertRaises(ValueError):
Variable(self.root, name='var\x00name')
with self.assertRaises(ValueError):
self.root.globalsetvar('var\x00name', "value")
with self.assertRaises(ValueError):
self.root.globalsetvar(b'var\x00name', "value")
with self.assertRaises(ValueError):
self.root.setvar('var\x00name', "value")
with self.assertRaises(ValueError):
self.root.setvar(b'var\x00name', "value")
def test_initialize(self):
v = Var(self.root)
self.assertFalse(v.side_effect)
v.set("value")
self.assertTrue(v.side_effect)
class TestStringVar(TestBase):
def test_default(self):
v = StringVar(self.root)
self.assertEqual("", v.get())
def test_get(self):
v = StringVar(self.root, "abc", "name")
self.assertEqual("abc", v.get())
self.root.globalsetvar("name", "value")
self.assertEqual("value", v.get())
def test_get_null(self):
v = StringVar(self.root, "abc\x00def", "name")
self.assertEqual("abc\x00def", v.get())
self.root.globalsetvar("name", "val\x00ue")
self.assertEqual("val\x00ue", v.get())
class TestIntVar(TestBase):
def test_default(self):
v = IntVar(self.root)
self.assertEqual(0, v.get())
def test_get(self):
v = IntVar(self.root, 123, "name")
self.assertEqual(123, v.get())
self.root.globalsetvar("name", "345")
self.assertEqual(345, v.get())
def test_invalid_value(self):
v = IntVar(self.root, name="name")
self.root.globalsetvar("name", "value")
with self.assertRaises((ValueError, TclError)):
v.get()
self.root.globalsetvar("name", "345.0")
with self.assertRaises((ValueError, TclError)):
v.get()
class TestDoubleVar(TestBase):
def test_default(self):
v = DoubleVar(self.root)
self.assertEqual(0.0, v.get())
def test_get(self):
v = DoubleVar(self.root, 1.23, "name")
self.assertAlmostEqual(1.23, v.get())
self.root.globalsetvar("name", "3.45")
self.assertAlmostEqual(3.45, v.get())
def test_get_from_int(self):
v = DoubleVar(self.root, 1.23, "name")
self.assertAlmostEqual(1.23, v.get())
self.root.globalsetvar("name", "3.45")
self.assertAlmostEqual(3.45, v.get())
self.root.globalsetvar("name", "456")
self.assertAlmostEqual(456, v.get())
def test_invalid_value(self):
v = DoubleVar(self.root, name="name")
self.root.globalsetvar("name", "value")
with self.assertRaises((ValueError, TclError)):
v.get()
class TestBooleanVar(TestBase):
def test_default(self):
v = BooleanVar(self.root)
self.assertIs(v.get(), False)
def test_get(self):
v = BooleanVar(self.root, True, "name")
self.assertIs(v.get(), True)
self.root.globalsetvar("name", "0")
self.assertIs(v.get(), False)
self.root.globalsetvar("name", 42 if self.root.wantobjects() else 1)
self.assertIs(v.get(), True)
self.root.globalsetvar("name", 0)
self.assertIs(v.get(), False)
self.root.globalsetvar("name", "on")
self.assertIs(v.get(), True)
def test_set(self):
true = 1 if self.root.wantobjects() else "1"
false = 0 if self.root.wantobjects() else "0"
v = BooleanVar(self.root, name="name")
v.set(True)
self.assertEqual(self.root.globalgetvar("name"), true)
v.set("0")
self.assertEqual(self.root.globalgetvar("name"), false)
v.set(42)
self.assertEqual(self.root.globalgetvar("name"), true)
v.set(0)
self.assertEqual(self.root.globalgetvar("name"), false)
v.set("on")
self.assertEqual(self.root.globalgetvar("name"), true)
def test_invalid_value_domain(self):
false = 0 if self.root.wantobjects() else "0"
v = BooleanVar(self.root, name="name")
with self.assertRaises(TclError):
v.set("value")
self.assertEqual(self.root.globalgetvar("name"), false)
self.root.globalsetvar("name", "value")
with self.assertRaises(ValueError):
v.get()
self.root.globalsetvar("name", "1.0")
with self.assertRaises(ValueError):
v.get()
tests_gui = (TestVariable, TestStringVar, TestIntVar,
TestDoubleVar, TestBooleanVar)
if __name__ == "__main__":
from test.support import run_unittest
run_unittest(*tests_gui)
| 31.206573 | 77 | 0.606289 |
8053e20f581178fa1653e5a1574177dd5703ab87 | 74 | py | Python | mlsim/__init__.py | brownsarahm/ml-sim | 26c03e65c57a2ffc3784e6e79eaa76100f0bf391 | [
"MIT"
] | 1 | 2020-11-18T21:08:17.000Z | 2020-11-18T21:08:17.000Z | mlsim/__init__.py | brownsarahm/ml-sim | 26c03e65c57a2ffc3784e6e79eaa76100f0bf391 | [
"MIT"
] | 2 | 2020-02-11T21:37:03.000Z | 2020-07-09T01:10:49.000Z | mlsim/__init__.py | brownsarahm/ml-sim | 26c03e65c57a2ffc3784e6e79eaa76100f0bf391 | [
"MIT"
] | null | null | null |
from . import anomaly
from . import bias
__all__ = ['bias','anomaly']
| 9.25 | 28 | 0.662162 |
89c0bf36d7b103b646df6f313f741523feb852e7 | 634 | py | Python | agent0/common/vec_env/__init__.py | zhoubin-me/agent0 | 1184827077e43dfa63e1f24a004fcc6c3e3d5130 | [
"MIT"
] | null | null | null | agent0/common/vec_env/__init__.py | zhoubin-me/agent0 | 1184827077e43dfa63e1f24a004fcc6c3e3d5130 | [
"MIT"
] | null | null | null | agent0/common/vec_env/__init__.py | zhoubin-me/agent0 | 1184827077e43dfa63e1f24a004fcc6c3e3d5130 | [
"MIT"
] | null | null | null | from .dummy_vec_env import DummyVecEnv
from .shmem_vec_env import ShmemVecEnv
from .subproc_vec_env import SubprocVecEnv
from .vec_env import AlreadySteppingError, NotSteppingError, VecEnv, VecEnvWrapper, VecEnvObservationWrapper, \
CloudpickleWrapper
from .vec_frame_stack import VecFrameStack
from .vec_monitor import VecMonitor
from .vec_remove_dict_obs import VecExtractDictObs
__all__ = ['AlreadySteppingError', 'NotSteppingError', 'VecEnv', 'VecEnvWrapper', 'VecEnvObservationWrapper', 'CloudpickleWrapper', 'DummyVecEnv', 'ShmemVecEnv', 'SubprocVecEnv', 'VecFrameStack', 'VecMonitor', 'VecNormalize', 'VecExtractDictObs']
| 57.636364 | 246 | 0.829653 |
465d237f4c57946c16db4d17be14dff3747dbb86 | 2,978 | py | Python | conans/test/functional/deploy_test.py | amatoshka/conan | c2726e8c255adb120b5f7bdee9e3ec0bc90f1d7a | [
"MIT"
] | null | null | null | conans/test/functional/deploy_test.py | amatoshka/conan | c2726e8c255adb120b5f7bdee9e3ec0bc90f1d7a | [
"MIT"
] | 2 | 2018-02-22T21:28:04.000Z | 2018-09-28T13:51:47.000Z | conans/test/functional/deploy_test.py | amatoshka/conan | c2726e8c255adb120b5f7bdee9e3ec0bc90f1d7a | [
"MIT"
] | null | null | null | import os
import unittest
from parameterized.parameterized import parameterized
from conans.model.manifest import FileTreeManifest
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
from conans.util.files import load, mkdir
class DeployTest(unittest.TestCase):
@parameterized.expand([(True, ), (False, )])
def deploy_test(self, deploy_to_abs):
client = TestClient()
libconanfile = """from conans import ConanFile
from conans.tools import save
class Lib(ConanFile):
exports_sources = "*"
def build(self):
save("mylib.dll", "mydll")
def package(self):
self.copy("*")
def deploy(self):
self.output.info("Lib deploy()")
"""
client.save({"conanfile.py": libconanfile,
"License.md": "lib license",
"otherfile": ""})
client.run("create . Lib/0.1@user/testing")
self.assertNotIn("Lib deploy()", client.out)
if deploy_to_abs:
dll_folder = temp_folder()
mkdir(dll_folder)
else:
dll_folder = ""
conanfile = """from conans import ConanFile
from conans.tools import save
class Pkg(ConanFile):
requires = "Lib/0.1@user/testing"
def build(self):
save("myapp.exe", "myexe")
def package(self):
self.copy("*")
def deploy(self):
self.output.info("Pkg deploy()")
self.copy("*.exe")
self.copy_deps("*.dll", dst="%s")
""" % dll_folder.replace("\\", "/")
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@user/testing")
self.assertNotIn("deploy()", client.out)
def test_install_in(folder):
client.current_folder = temp_folder()
client.run("install Pkg/0.1@user/testing --install-folder=%s" % folder)
self.assertIn("Pkg/0.1@user/testing deploy(): Copied 1 '.dll' file: mylib.dll",
client.out)
self.assertIn("Pkg/0.1@user/testing deploy(): Copied 1 '.exe' file: myapp.exe",
client.out)
deploy_manifest = FileTreeManifest.loads(load(os.path.join(client.current_folder,
folder,
"deploy_manifest.txt")))
app = os.path.abspath(os.path.join(client.current_folder, folder, "myapp.exe"))
if deploy_to_abs:
lib = os.path.join(dll_folder, "mylib.dll")
else:
lib = os.path.abspath(os.path.join(client.current_folder, folder, "mylib.dll"))
self.assertEqual(sorted([app, lib]),
sorted(deploy_manifest.file_sums.keys()))
self.assertEqual(load(app), "myexe")
self.assertEqual(load(lib), "mydll")
test_install_in("./")
test_install_in("other_install_folder")
| 37.696203 | 95 | 0.572532 |
a4490577576ace8a7ae9268ce013320822db68f4 | 3,146 | py | Python | build/lib/gains/tests/test_density.py | BeckResearchLab/gains | fa3e008cfd129b1a0411368306eaad0b62309ae0 | [
"MIT"
] | 7 | 2018-06-21T16:58:32.000Z | 2019-12-11T19:57:37.000Z | build/lib/gains/tests/test_density.py | BeckResearchLab/gains | fa3e008cfd129b1a0411368306eaad0b62309ae0 | [
"MIT"
] | 5 | 2018-04-04T16:19:27.000Z | 2018-05-23T01:46:18.000Z | build/lib/gains/tests/test_density.py | BeckResearchLab/gains | fa3e008cfd129b1a0411368306eaad0b62309ae0 | [
"MIT"
] | 2 | 2018-07-12T05:05:04.000Z | 2019-12-03T05:39:45.000Z | from __future__ import absolute_import, division, print_function
import gains as genetic
from rdkit.Chem import AllChem as Chem
from rdkit.ML.Descriptors.MoleculeDescriptors import\
MolecularDescriptorCalculator as calculator
import numpy as np
import unittest
import datetime
from math import exp
import random
class GuessIonTests(unittest.TestCase):
geneSet = genetic.generate_geneset()
df = genetic.load_data("saltInfo.csv")
df = df['anion_SMILES'].unique()
ohPickMe = random.sample(range(df.shape[0]), 1)
anion = Chem.MolFromSmiles(df[ohPickMe[0]])
def test_1_density(self):
target = random.sample(range(800, 1500), 1)[0]
self.guess_password(target)
def test_benchmark(self):
genetic.Benchmark.run(self.test_1_density)
def guess_password(self, target):
startTime = datetime.datetime.now()
def fnGetFitness(genes):
return get_fitness(self.anion, genes, target)
def fnDisplay(candidate, mutation):
display(candidate, mutation, startTime)
def fnShowIon(genes, target, mutation_attempts):
show_ion(genes, target, mutation_attempts)
optimalFitness = 0.99
best = genetic.get_best(fnGetFitness, optimalFitness,
self.geneSet, fnDisplay,
fnShowIon, target)
return best
def display(candidate, mutation, startTime):
timeDiff = datetime.datetime.now() - startTime
print("{}\t{}\t{}\t{}".format(candidate.Genes, candidate.Fitness,
mutation, timeDiff))
def get_fitness(anion, genes, target):
cation = Chem.MolFromSmiles(genes)
model = genetic.load_data("density_nn_model.sav", pickleFile=True)
deslist = genetic.load_data("density_nn_model_descriptors.csv")
feature_vector = []
with genetic.suppress_stdout_stderr():
for item in deslist:
if "anion" in item:
feature_vector.append(calculator([item.partition('-')
[0]]).CalcDescriptors(anion)[0])
elif "cation" in item:
feature_vector.append(calculator([item.partition('-')
[0]]).CalcDescriptors(cation)[0])
elif "Temperature_K" in item:
feature_vector.append(298.15)
elif "Pressure_kPa" in item:
feature_vector.append(101.325)
else:
print("unknown descriptor in list: %s" % item)
features_normalized = (feature_vector - deslist.iloc[0].values) /\
deslist.iloc[1].values
prediction = exp(model.predict(np.array(features_normalized).
reshape(1, -1))[0])
error = abs((prediction - target) / target)
return 1 - error
def show_ion(genes, target, mutation_attempts):
mol = Chem.MolFromSmiles(genes)
print("{}\t{}".format("number of atoms: ", mol.GetNumAtoms()))
print("{}\t{}".format("mutation attempts: ", mutation_attempts))
print("within 1%% of target density: %s (kg/m) " % target)
if __name__ == '__main__':
unittest.main()
| 35.75 | 71 | 0.630642 |
a2e21cd55be4823e8cb56fa538e704d8c1c46fbd | 69,468 | py | Python | python/paddle/fluid/executor.py | mamingjie-China/Paddle | 91d2f1e3e6e51142a74a43d0673a8feff056c39b | [
"Apache-2.0"
] | 1 | 2021-12-20T09:44:25.000Z | 2021-12-20T09:44:25.000Z | python/paddle/fluid/executor.py | Janayt/Paddle | 68c6160e639be38c57a7dd831f7b841b33e92676 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/executor.py | Janayt/Paddle | 68c6160e639be38c57a7dd831f7b841b33e92676 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import multiprocessing
import sys
import warnings
import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
import six
from .data_feeder import convert_dtype
from .framework import Program, default_main_program, Variable, Operator, convert_np_dtype_to_dtype_
from . import core
from . import compiler
from .. import compat as cpt
from .trainer_factory import TrainerFactory
from .trainer_factory import FetchHandlerMonitor
import copy
__all__ = ['Executor', 'global_scope', 'scope_guard']
g_scope = core.Scope()
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
def global_scope():
"""
:api_attr: Static Graph
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Returns:
Scope: The global/default scope instance.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(fluid.global_scope().find_var("data").get_tensor())
"""
return g_scope
def _switch_scope(scope):
global g_scope
ex = g_scope
g_scope = scope
return ex
@signature_safe_contextmanager
def scope_guard(scope):
"""
:api_attr: Static Graph
This function switches scope through python `with` statement.
Scope records the mapping between variable names and variables ( :ref:`api_guide_Variable` ),
similar to brackets in programming languages.
If this function is not invoked, all variables and variable names are recorded in the default global scope.
When users need to create variables with the same name,
they need to switch scopes through this function
if they do not want the mapping of variables with the same name to be overwritten.
After switching through the `with` statement,
all variables created in the `with` block will be assigned to a new scope.
Parameters:
scope: The new scope.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
"""
ex = _switch_scope(scope)
try:
yield
finally:
_switch_scope(ex)
def as_numpy(tensor):
"""
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
tensor = new_scope.find_var("data").get_tensor()
fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
"""
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors hold LoD information. \
They can not be completely cast to Python ndarray. \
Please set the parameter 'return_numpy' as 'False' to \
return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
else:
return None
def dtype_is_compatible_with(first, second):
"""
Returns True if the first dtype can be compatible the second one.
Currently, we require the two dtype's have to be same.
Args:
dtype (np.dtype|VarType|str): The type of data: float32, int64, etc.
Returns:
True if the two types are same.
"""
if not isinstance(first, core.VarDesc.VarType):
first = convert_np_dtype_to_dtype_(first)
if not isinstance(second, core.VarDesc.VarType):
second = convert_np_dtype_to_dtype_(second)
return first == second
def dimension_is_compatible_with(first, second):
"""
Returns True if the two dimensions are compatible.
A dimension is compatible with the other if:
1. The length of the dimensions are same.
2. Each non-negative number of the two dimensions are same.
3. For negative number or 'None' in a dimension, it means unknown so it
is compatible with any number.
Args:
first (list/tuple): integers representing shape. "None" or negative
number means unknown.
second (list/tuple): integers representing shape. "None" or negative
number means unknown.
Returns:
True if the two dimensions are compatible.
"""
dim_len = len(first)
if dim_len != len(second):
return False
for i in range(dim_len):
if first[i] is None or first[i] < 0:
continue
if second[i] is None or second[i] < 0:
continue
if first[i] != second[i]:
return False
return True
def check_feed_shape_type(var, feed, num_places=1):
"""
Returns True if the variable doesn't require feed check or it is compatible
with the shape and have same dtype as the fed value.
A dimension is compatible with the other if:
1. The length of the dimensions are same.
2. Each non-negative number of the two dimensions are same.
3. For negative number or 'None' in a dimension, it means unknown so it
is compatible with any number.
Args:
var (Variable): the Variable object
feed (LoDTensor): the fed value, which must be a LoDTensor
num_places: an integer value indicating the number of places.
ParallelExecutor will divide data into devices (CPU/GPU) evenly.
Returns:
True if the shape and dtype of variable is compatible with the feed value
Raises:
ValueError: if the shape or dtype of the variable is not compatible with
the feed value
"""
if var.desc.need_check_feed():
diff_shape = core.diff_tensor_shape(feed, var.desc, num_places)
if diff_shape is not None:
raise ValueError(
'The fed Variable %r should have dimensions = %d, shape = '
'%r, but received fed shape %r on each device' %
(var.name, len(var.shape), var.shape, diff_shape))
if not dtype_is_compatible_with(feed._dtype(), var.dtype):
var_dtype_format = convert_dtype(var.dtype) if isinstance(
var.dtype, core.VarDesc.VarType) else var.dtype
feed_dtype_format = convert_dtype(feed._dtype()) if isinstance(
feed._dtype(), core.VarDesc.VarType) else feed._dtype()
raise ValueError(
'The data type of fed Variable %r must be %r, but received %r' %
(var.name, var_dtype_format, feed_dtype_format))
return True
def has_feed_operators(block, feed_targets, feed_holder_name):
""" Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
"""
feed_count = 0
for op in block.ops:
if op.desc.type() == 'feed':
feed_count += 1
assert op.desc.input('X')[0] == feed_holder_name
feed_target_name = op.desc.output('Out')[0]
if feed_target_name not in feed_targets:
raise Exception("'feed_targets' does not have {} variable".
format(feed_target_name))
else:
break
if feed_count > 0 and feed_count != len(feed_targets):
raise Exception(
"Feed operators in program desc do not match 'feed_targets'")
return feed_count > 0
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
""" Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
"""
fetch_count = 0
for op in block.ops:
if op.desc.type() == 'fetch':
fetch_count += 1
assert op.desc.output('Out')[0] == fetch_holder_name
fetch_target_name = op.desc.input('X')[0]
if fetch_target_name not in [
var.desc.name() for var in fetch_targets
]:
raise Exception("'fetch_targets' does not have {} variable".
format(fetch_target_name))
idx = op.desc.attr('col')
assert fetch_target_name == fetch_targets[idx].desc.name()
if fetch_count > 0 and fetch_count != len(fetch_targets):
raise Exception(
"Fetch operators in program desc do not match 'fetch_targets'")
return fetch_count > 0
def _fetch_var(name, scope=None, return_numpy=True):
"""
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
"""
assert isinstance(name, six.string_types)
if scope is None:
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(_to_name_str(name))
assert var is not None, (
"Cannot find " + name + " in scope. Perhaps you need to make the"
" variable persistable by using var.persistable = True in your"
" program.")
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
def _to_name_str(var):
def _to_str(var):
if isinstance(var, Variable):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
return str(var)
elif isinstance(var, Operator):
return str(id(var))
else:
raise TypeError(str(var) + " should be Variable, Operator or str")
# NOTEz(zhiqiu): The item in fetch_list may be tuple returned by Optimizer.minimize(),
# see comments in _split_optimize_ops_in_fetch_list for more details.
if isinstance(var, tuple):
var = var[0]
if isinstance(var, list):
s = [_to_str(item) for item in var]
return ','.join(s)
else:
return _to_str(var)
def _get_strong_program_cache_key(program, feed, fetch_list):
return str(id(program)) + _get_program_cache_key(feed, fetch_list)
def _get_program_cache_key(feed, fetch_list):
feed_var_names = []
if isinstance(feed, dict):
feed_var_names = list(feed.keys())
elif isinstance(feed, list) or isinstance(feed, tuple):
for i, each in enumerate(feed):
feed_var_names += list(each.keys())
fetch_var_names = list(map(_to_name_str, fetch_list))
return str(feed_var_names + fetch_var_names)
def _as_lodtensor(data, place, dtype=None):
"""
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray|list|tuple|scalar): a instance of array, scalar, list or tuple
data(core.Place): the place of created tensor
dtype(core.VarDesc.VarType|str): the expected data type of created tensor
Returns:
LoDTensor
"""
#NOTE(zhiqiu): convert python builtin, like float, int, and list, to numpy ndarray
if not isinstance(data, np.ndarray):
assert dtype is not None, 'The dtype should be given when feed data is not np.ndarray'
dtype = convert_dtype(dtype) if isinstance(
dtype, core.VarDesc.VarType) else dtype
if np.isscalar(data):
data = np.array([data]).astype(dtype)
elif isinstance(data, (list, tuple)):
data = np.array(data)
if data.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"Please consider using 'fluid.create_lod_tensor' to convert it to a LoD-Tensor."
)
data = data.astype(dtype)
else:
raise TypeError(
"Convert data of type {} to Tensor is not supported".format(
type(data)))
# convert numpy.ndarray to tensor
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
class FetchHandler(object):
def __init__(self, var_dict=None, period_secs=60):
assert var_dict != None
self.var_dict = var_dict
self.period_secs = period_secs
def handler(self, res_dict):
for key in res_dict:
if type(res_dict[key]) is np.ndarray:
sys.stdout.write("{}[0]: {} ".format(key, res_dict[key][0]))
sys.stdout.write("\n")
@staticmethod
def help():
print("""
class FetchHandlerExample(FetchHandler):
def handler(self, res_dict):
print(res_dict["auc"])
print("auc: {}, {}".format(res_dict["auc"], time.ctime()))
auc = Variable()
var_dict = {"auc": auc}
handler = FetchHandlerExample(var_dict=var_dict)
""")
class Executor(object):
"""
:api_attr: Static Graph
An Executor in Python, supports single/multiple-GPU running,
and single/multiple-CPU running.
Args:
place(fluid.CPUPlace()|fluid.CUDAPlace(n)|None): This parameter represents
which device the executor runs on. When this parameter is None, PaddlePaddle
will set the default device according to its installation version. If Paddle
is CPU version, the default device would be set to `CPUPlace()` . If Paddle is
GPU version, the default device would be set to `CUDAPlace(0)` . Default is None.
Returns:
Executor
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import numpy
import os
# Set place explicitly.
# use_cuda = True
# place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# exe = fluid.Executor(place)
# If you don't set place, PaddlePaddle sets the default device.
exe = fluid.Executor()
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
# Run the startup program once and only once.
# Not need to optimize/compile the startup program.
startup_program.random_seed=1
exe.run(startup_program)
# Run the main program directly without compile.
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(train_program,
feed={"X": x},
fetch_list=[loss.name])
# Or, compiled the program and run. See `CompiledProgram`
# for more detail.
# NOTE: If you use CPU to run the program or Paddle is
# CPU version, you need to specify the CPU_NUM, otherwise,
# fluid will use all the number of the logic core as
# the CPU_NUM, in that case, the batch size of the input
# should be greater than CPU_NUM, if not, the process will be
# failed by an exception.
# Set place explicitly.
# if not use_cuda:
# os.environ['CPU_NUM'] = str(2)
# If you don't set place and PaddlePaddle is CPU version
os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
loss_name=loss.name)
loss_data, = exe.run(compiled_prog,
feed={"X": x},
fetch_list=[loss.name])
"""
def __init__(self, place=None):
if place is None:
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
else:
self.place = place
self.program_caches = dict()
self.ctx_caches = dict()
self.scope_caches = dict()
self.var_caches = dict()
self.pruned_program_caches = dict()
p = core.Place()
p.set_place(self.place)
self._default_executor = core.Executor(p)
self._closed = False
self.pruned_program_scope_caches = dict()
def _get_scope_cache(self, program_cache_key):
return self.scope_caches.get(program_cache_key, None)
def _get_ctx_cache(self, program_cache_key):
return self.ctx_caches.get(program_cache_key, None)
def _get_program_cache(self, program_cache_key):
return self.program_caches.get(program_cache_key, None)
def _add_program_cache(self, program_cache_key, program):
self.program_caches[program_cache_key] = program
def _get_pruned_program_cache(self, program_cache_key):
return self.pruned_program_caches.get(program_cache_key, None)
def _add_pruned_program_cache(self, program_cache_key, program):
self.pruned_program_caches[program_cache_key] = program
def _get_pruned_program_scope_cache(self, program_cache_key):
return self.pruned_program_scope_caches.get(program_cache_key, None)
def _add_pruned_program_scope_cache(self, program_cache_key, program):
self.pruned_program_scope_caches[program_cache_key] = program
def _add_ctx_cache(self, ctx_cache_key, ctx):
self.ctx_caches[ctx_cache_key] = ctx
def _add_scope_cache(self, scope_cache_key, scope):
self.scope_caches[scope_cache_key] = scope
def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
fetch_var_name):
tmp_program = program.clone()
global_block = tmp_program.global_block()
if feed_var_name in global_block.vars:
feed_var = global_block.var(feed_var_name)
else:
feed_var = global_block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
if fetch_var_name in global_block.vars:
fetch_var = global_block.var(fetch_var_name)
else:
fetch_var = global_block.create_var(
name=fetch_var_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
# prepend feed operators
if not has_feed_operators(global_block, feed, feed_var_name):
for i, name in enumerate(feed):
if global_block.has_var(name):
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
else:
warnings.warn(
"The variable %s is not found in program. It is not declared or is pruned."
% name)
# append fetch_operators
if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var, six.string_types), (
"Wrong type for fetch_list[%s]: %s" % (i, type(var)))
global_block.append_op(
type='fetch',
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
return tmp_program
def _feed_data(self, program, feed, feed_var_name, scope):
# feed var to framework
global_block = program.global_block()
for op in global_block.ops:
if op.desc.type() == 'feed':
feed_target_name = op.desc.output('Out')[0]
cur_feed = feed[feed_target_name]
var = global_block.var(feed_target_name)
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = _as_lodtensor(cur_feed, self.place, var.dtype)
check_feed_shape_type(var, cur_feed)
idx = op.desc.attr('col')
core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
else:
break
def _fetch_data(self, fetch_list, fetch_var_name, scope):
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in six.moves.range(len(fetch_list))
]
return outs
def _split_optimize_ops_in_fetch_list(self, fetch_list):
"""
Split optimize_ops from fetch_list, which provided to specify program prunning.
Args:
fetch_list(list): The original fetch_list.
Possible types of fetch_list are:
fetch_list = ['loss']
fetch_list = [[sgd, sgd], 'loss']
fetch_list = [([sgd, sgd], [(param, grad)]), 'loss']
Returns:
optimize_ops(list): The optimize operators splited from fetch_list.
fetch_list(list): The updated fetch_list which does not contain optimize operators.
"""
_optimize_ops = []
_fetch_list = []
def _get_targets(_optimize_ops, _fetch_list, item):
if isinstance(item, Operator):
if item._is_optimize_op():
_optimize_ops.append(item)
else:
raise TypeError(
"The operator in fetch_list is not an optimize_op")
elif isinstance(item, Variable) or isinstance(
item, str) or isinstance(item, six.string_types):
_fetch_list.append(item)
else:
raise TypeError(
"The item in fetch_list should be str, variable or optimize_op, but recieved %s.",
type(item))
for item in fetch_list:
# NOTE(zhiqiu): to support (optimizer_ops, param_and_grads) and optimizer_ops in fetch_list
# we should handle tuple and list in fetch_list.
# TODO(zhiqiu): find a better way to handle that.
if isinstance(item, list):
for i in item:
_get_targets(_optimize_ops, _fetch_list, i)
elif isinstance(item, tuple):
for i in item[0]:
_get_targets(_optimize_ops, _fetch_list, i)
else:
_get_targets(_optimize_ops, _fetch_list, item)
return _fetch_list, _optimize_ops
def _prune_program(self,
program,
feed=None,
fetch_list=None,
optimize_ops=None):
"""
Prune operators and variables which are not needed to generate
:code:`fetch_list` and optimize operators.
Prune operators and variables which are needed
to generate variables to be feeded.
Notes: This is a very low level API. Users should not use this API
directly.
Args:
program(Program): the origin program
feed(list|dict): feed dict or list.
fetch_list(list|Variable): A list of variables need to be fetched
optimize_ops(list[Operator]): A list of optimizer operators
Returns:
Program: A new, pruned program.
"""
compiled = isinstance(program, compiler.CompiledProgram)
if compiled:
if program._program:
origin_program = program._program
else:
warnings.warn(
"The program holds no _program, maybe it is constructed by graph, which can't be pruned yet."
)
return
else:
origin_program = program
feed_names = []
if isinstance(feed, dict):
feed_names = list(feed.keys())
elif isinstance(feed, list) or isinstance(feed, tuple):
for i, each in enumerate(feed):
feed_names += list(each.keys())
# if optimize_ops is [], all optimize ops in the program is used.
if not optimize_ops:
for block in origin_program.blocks:
for op in block.ops:
if op._is_optimize_op():
optimize_ops.append(op)
targets = fetch_list + optimize_ops
pruned_program = origin_program._prune_with_input(feed_names, targets)
if compiled:
# for compiled program, update the underlying program, re-generate graph,
# and reset the flag so it can be compiled again.
program._program = pruned_program
program._graph = core.Graph(pruned_program.desc)
program._compiled = False
else:
program = pruned_program
return program
def _update_feed(self, program, feed):
"""
Update the feed dict, remove the feed item which is pruned in program.
Notes: This is a very low level API. Users should not use this API
directly.
Args:
program(Program): the pruned program.
feed(list|dict): feed dict or list.
Returns:
feed:(list|dict) updated feed.
"""
compiled = isinstance(program, compiler.CompiledProgram)
if compiled:
if program._program:
global_block = program._program.global_block()
else:
warnings.warn(
"The program holds no _program, maybe it is constructed by graph."
)
else:
global_block = program.global_block()
if isinstance(feed, dict):
for feed_name in list(feed.keys()):
if not global_block.has_var(feed_name):
feed.pop(feed_name)
warnings.warn(
"The variable %s is not found in program. It is not declared or is pruned."
% feed_name)
elif isinstance(feed, list) or isinstance(feed, tuple):
for i, each in enumerate(feed):
for feed_name in list(each.keys()):
if not global_block.has_var(feed_name):
each.pop(feed_name)
warnings.warn(
"The variable %s is not found in program. It is not declared or is pruned."
% feed_name)
return feed
'''
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
'''
def close(self):
"""
Close the executor. This interface is used for distributed training (PServers mode).
This executor can not be used after calling the interface, because
this interface releases resources associated with the current Trainer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
# execute training or testing
exe.close()
"""
if not self._closed:
self._default_executor.close()
self._closed = True
def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
return_numpy, return_merged):
exe = program._executor
# TODO(zhenghuihuang): quantization uses Graph in CompiledProgram
# instead of program. We will add support for checking Vars in Graph
need_check_feed = program._program is not None
if need_check_feed:
global_block = program._program.global_block()
if isinstance(feed, dict):
feed_tensor_dict = dict()
for feed_name in feed:
feed_tensor = feed[feed_name]
var = global_block.var(feed_name) if need_check_feed else None
if not isinstance(feed_tensor, core.LoDTensor):
# always set to CPU place, since the tensor need to be split
# it is fast in CPU
feed_tensor = _as_lodtensor(feed[feed_name],
core.CPUPlace(), var.dtype
if var else None)
if need_check_feed:
check_feed_shape_type(var, feed_tensor, exe.device_count())
feed_tensor_dict[feed_name] = feed_tensor
exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
elif isinstance(feed, list) or isinstance(feed, tuple):
res = list()
for i, each in enumerate(feed):
if not isinstance(each, dict):
raise TypeError(
"Each element of feed list should be a dict")
res_dict = dict()
for feed_name in each:
tensor = each[feed_name]
var = global_block.var(
feed_name) if need_check_feed else None
if not isinstance(tensor, core.LoDTensor):
tensor = _as_lodtensor(each[feed_name],
program._places[i], var.dtype
if var else None)
if need_check_feed:
check_feed_shape_type(var, tensor)
res_dict[feed_name] = tensor
res.append(res_dict)
exe.feed_tensors_into_local_scopes(res)
fetch_var_names = list(map(_to_name_str, fetch_list))
tensors = exe.run(fetch_var_names, return_merged)._move_to_list()
return as_numpy(tensors) if return_numpy else tensors
def run(self,
program=None,
feed=None,
fetch_list=None,
feed_var_name='feed',
fetch_var_name='fetch',
scope=None,
return_numpy=True,
use_program_cache=False,
return_merged=True,
use_prune=False):
"""
Run the specified :code:`Program` or :code:`CompiledProgram`. It should be noted that the executor
will execute all the operators in :code:`Program` or :code:`CompiledProgram` without pruning some
operators of the :code:`Program` or :code:`CompiledProgram` according to fetch_list. And you could
specify the scope to store the :code:`Variables` during the executor running if the scope
is not set, the executor will use the global scope, i.e. :code:`fluid.global_scope()`.
Args:
program(Program|CompiledProgram): This parameter represents the :code:`Program` or
:code:`CompiledProgram` to be executed. If this parameter is not provided, that
parameter is None, the program will be set to :code:`fluid.default_main_program()`.
The default is None.
feed(list|dict): This parameter represents the input variables of the model.
If it is single card training, the feed is dict type, and if it is multi-card
training, the parameter feed can be dict or list type variable. If the
parameter type is dict, the data in the feed will be split and sent to
multiple devices (CPU/GPU), that is to say, the input data will be evenly
sent to different devices, so you should make sure the number of samples of
the current mini-batch must be greater than the number of places;
if the parameter type is list, those data are copied directly to each device,
so the length of this list should be equal to the number of places.
The default is None.
fetch_list(list): This parameter represents the variables that need to be returned
after the model runs. The default is None.
feed_var_name(str): This parameter represents the name of the input variable of
the feed operator. The default is "feed".
fetch_var_name(str): This parameter represents the name of the output variable of
the fetch operator. The default is "fetch".
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is :code:`fluid.global_scope()`
return_numpy(bool): This parameter indicates whether convert the fetched variables
(the variable specified in the fetch list) to numpy.ndarray. if it is False,
the type of the return value is a list of :code:`LoDTensor`. The default is True.
use_program_cache(bool): This parameter indicates whether the input :code:`Program` is cached.
If the parameter is True, the model may run faster in the following cases:
the input program is :code:`fluid.Program`, and the parameters(program, feed variable name
and fetch_list variable) of this interface remains unchanged during running.
The default is False.
return_merged(bool): This parameter indicates whether fetched variables (the variables
specified in the fetch list) should be merged according to the execution device dimension.
If :code:`return_merged` is False, the type of the return value is a two-dimensional list
of :code:`Tensor` / :code:`LoDTensorArray` ( :code:`return_numpy` is False) or a two-dimensional
list of :code:`numpy.ndarray` ( :code:`return_numpy` is True). If :code:`return_merged` is True,
the type of the return value is an one-dimensional list of :code:`Tensor` / :code:`LoDTensorArray`
( :code:`return_numpy` is False) or an one-dimensional list of :code:`numpy.ndarray`
( :code:`return_numpy` is True). Please see Examples 2 for more details. If the lengths of fetched
results are variant, please set :code:`return_merged` as False, which denotes that the fetched
results will not be merged. The default is True, but it is just for the compatibility, and may
use False as default value in the future version.
use_prune(bool): This parameter indicates whether the input :code:`Program` will be pruned.
If the parameter is True, the program will be pruned accroding to the given feed and fetch_list,
which means the operators and variables in program that generate :code:`feed` and are not
needed to generate :code:`fetch_list` will be pruned. The default is False, which means the
program will not pruned and all the operators and variables will be executed during running.
Note that if the tuple returned from :code:`Optimizer.minimize()` is passed to :code:`fetch_list`,
:code:`use_prune` will be overrided to True, and the program will be pruned.
Returns:
List: The fetched result list.
NOTES:
1. If it is multi-card running and the feed parameter is dict type, the input data
will be evenly sent to different cards. For example, using two GPUs to run the model,
the input sample number is 3, that is, [0, 1, 2], the sample number on GPU0 is 1,
that is, [0], and the sample number on GPU1 is 2, that is, [1, 2].
If the number of samples is less than the number of devices, the program will
throw an exception, so when running the model, you should make sure that the
number of samples of the last batch of the data set should be greater than the
number of CPU cores or GPU cards, if it is less than, it is recommended that
the batch be discarded.
2. If the number of CPU cores or GPU cards available is greater than 1, the fetch
results are spliced together in dimension 0 for the same variable values
(variables in fetch_list) on different devices.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
i = fluid.layers.zeros(shape=[1], dtype='int64')
array = fluid.layers.array_write(x=loss, i=i)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_val, array_val = exe.run(feed={'X': x},
fetch_list=[loss.name, array.name])
print(array_val)
# [array([0.02153828], dtype=float32)]
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# First create the Executor.
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
class_dim = 2
prediction = fluid.layers.fc(input=data, size=class_dim)
loss = fluid.layers.mean(prediction)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
build_strategy = fluid.BuildStrategy()
binary = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
batch_size = 6
x = np.random.random(size=(batch_size, 1)).astype('float32')
# Set return_merged as False to fetch unmerged results:
unmerged_prediction, = exe.run(binary, feed={'X': x},
fetch_list=[prediction.name],
return_merged=False)
# If the user uses two GPU cards to run this python code, the printed result will be
# (2, 3, class_dim). The first dimension value of the printed result is the number of used
# GPU cards, and the second dimension value is the quotient of batch_size and the
# number of used GPU cards.
print("The unmerged prediction shape: {}".format(np.array(unmerged_prediction).shape))
print(unmerged_prediction)
# Set return_merged as True to fetch merged results:
merged_prediction, = exe.run(binary, feed={'X': x},
fetch_list=[prediction.name],
return_merged=True)
# If the user uses two GPU cards to run this python code, the printed result will be
# (6, class_dim). The first dimension value of the printed result is the batch_size.
print("The merged prediction shape: {}".format(np.array(merged_prediction).shape))
print(merged_prediction)
# Out:
# The unmerged prediction shape: (2, 3, 2)
# [array([[-0.37620035, -0.19752218],
# [-0.3561043 , -0.18697084],
# [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354],
# [-0.49041364, -0.25748932],
# [-0.44331917, -0.23276259]], dtype=float32)]
# The merged prediction shape: (6, 2)
# [[-0.37789783 -0.19921964]
# [-0.3577645 -0.18863106]
# [-0.24274671 -0.12814042]
# [-0.24635398 -0.13003758]
# [-0.49232286 -0.25939852]
# [-0.44514108 -0.2345845 ]]
"""
try:
return self._run_impl(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache,
use_prune=use_prune,
return_merged=return_merged)
except Exception as e:
six.reraise(*sys.exc_info())
def _run_impl(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache,
return_merged, use_prune):
if self._closed:
raise RuntimeError("Attempted to use a closed Executor")
use_default_main_program = program is None
if program is None:
program = default_main_program()
if isinstance(program, Program) and \
len(program.global_block().ops) == 0:
if use_default_main_program:
error_info = "Now you are using default_main_program, "\
"but there are no operators in the program to be executed. "\
"Please ensure you create model correctly or you can pass "\
"the Program or the CompiledProgram manually."
else:
error_info = "There are no operators in the program to be executed. "\
"If you pass Program manually, please use fluid.program_guard "\
"to ensure the current Program is being used."
warnings.warn(error_info)
if scope is None:
scope = global_scope()
if fetch_list is not None:
if isinstance(fetch_list, Variable) or isinstance(
fetch_list, str) or isinstance(fetch_list,
six.string_types):
fetch_list = [fetch_list]
assert isinstance(fetch_list, tuple) or isinstance(fetch_list, list), \
"Currently , The fetch_list type only should be list or tuple, \n"\
"but the input type is {}. For more information please refer to \n"\
"the executor.run(...).".format(type(fetch_list))
else:
fetch_list = []
# use_prune can be overrided by putting optimize_ops in fetch_list
_origin_fetch_list = fetch_list
_origin_program = program
fetch_list, optimize_ops = self._split_optimize_ops_in_fetch_list(
fetch_list)
if optimize_ops:
use_prune = True
if use_prune:
cache_key = _get_strong_program_cache_key(program, feed,
_origin_fetch_list)
cached_pruned_program = self._get_pruned_program_cache(cache_key)
if cached_pruned_program is None:
if isinstance(program, compiler.CompiledProgram):
program_scope_cache = self._get_pruned_program_scope_cache(
str(id(_origin_program)))
# copy the original program, so it can be cached.
program = copy.copy(program)
# share the local scopes for same original CompiledProgram.
program._share_vars_from = program_scope_cache
if self._get_pruned_program_scope_cache(
str(id(_origin_program))) is None:
self._add_pruned_program_scope_cache(
str(id(_origin_program)), program)
pruned_program = self._prune_program(program, feed, fetch_list,
optimize_ops)
self._add_pruned_program_cache(cache_key, pruned_program)
else:
pruned_program = cached_pruned_program
feed = self._update_feed(pruned_program, feed)
program = pruned_program
compiled = isinstance(program, compiler.CompiledProgram)
# For backward compatibility, run directly.
if not compiled:
# In distributed training, the compiled program is saved in Program._graph
has_compiled_graph = isinstance(program._graph,
compiler.CompiledProgram)
if has_compiled_graph:
program._graph._compile(scope, self.place)
# _graph in program does not support inference since the _graph is optimized
# through optimizer.minimize function and should not be used as inference graph
# assert not program._graph._is_inference
return self._run_parallel(
program._graph,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy,
return_merged=return_merged)
return self._run_program(
program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
program._compile(scope, self.place)
if program._is_inference:
return self._run_inference(program._executor, feed)
else:
return self._run_parallel(
program,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy,
return_merged=return_merged)
def _run_program(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s" %
(type(feed)))
assert program is not None, "The program should not be Empty"
if not isinstance(program, Program):
raise TypeError(
"Executor requires Program as its Parameter. But you passed in %s"
% (type(program)))
if use_program_cache:
cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
cached_program = self._get_program_cache(cache_key)
cached_ctx = self._get_ctx_cache(cache_key)
cached_scope = self._get_scope_cache(cache_key)
if cached_program is None:
cached_program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._add_program_cache(cache_key, cached_program)
fetch_list_str = list(map(_to_name_str, fetch_list))
cached_ctx = self._default_executor.prepare(
cached_program.desc, 0, fetch_list_str, False)
# currently, we cache program, vars, sub_scope here
# we suppose that in a life cycle of training, a user
# will not create many programs. So, here the basic
# rule of caching is to cache all unseen (program, var, scope)
# when a user use use_program_cache.
cached_scope = scope.new_scope()
self._default_executor.create_variables(cached_program.desc,
cached_scope, 0)
self._add_ctx_cache(cache_key, cached_ctx)
self._add_scope_cache(cache_key, cached_scope)
program = cached_program
ctx = cached_ctx
scope = cached_scope
else:
program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._feed_data(program, feed, feed_var_name, scope)
if not use_program_cache:
self._default_executor.run(program.desc, scope, 0, True, True,
fetch_var_name)
else:
self._default_executor.run_prepared_ctx(ctx, scope, False, False,
False)
arr = scope.find_var(fetch_var_name).get_fetch_list()
tensors = arr._move_to_list()
if return_numpy:
return as_numpy(tensors)
else:
return tensors
def _run_inference(self, exe, feed):
return exe.run(feed)
def _dump_debug_info(self, program=None, trainer=None):
with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
fout.write(str(trainer))
if program._fleet_opt and "fleet_desc" in program._fleet_opt:
with open("fleet_desc.prototxt", "w") as fout:
fout.write(str(program._fleet_opt["fleet_desc"]))
def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
filelist_length = len(dataset.dataset.get_filelist())
if filelist_length < pipeline_num:
pipeline_num = filelist_length
print(
"Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
% (filelist_length, filelist_length))
if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
print(
"Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
% (filelist_length // pipeline_num, filelist_length))
pipeline_opt["concurrency_list"][
0] = filelist_length // pipeline_num
dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
return pipeline_num
def _prepare_trainer(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
is_heter = 0
if not program._fleet_opt is None:
if program._fleet_opt.get("worker_class", "") == "HeterCpuWorker":
is_heter = 1
if program._fleet_opt("trainer", "") == "HeterXpuTrainer":
is_heter = 1
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
if fetch_info is None:
fetch_info = []
assert len(fetch_list) == len(fetch_info)
compiled = isinstance(program, compiler.CompiledProgram)
if is_heter:
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fu = FleetUtil()
ret = fu.split_program_by_device(program)
if not compiled:
# TODO: Need a better way to distinguish and specify different execution mode
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(program._fleet_opt)
trainer._set_thread_barrier(program._is_distributed)
trainer._set_program(program)
if is_heter:
trainer._set_heter_info(ret)
else:
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program.program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(
program.program._fleet_opt)
trainer._set_program(program.program)
if thread <= 0:
if dataset.thread_num <= 0:
raise RuntimeError(
"You should set thread num first, either in Dataset"
"or in Executor.train_from_dataset")
else:
trainer._set_thread(dataset.thread_num)
else:
trainer._set_thread(thread)
trainer._set_debug(debug)
trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
return scope, trainer
def _run_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
is_infer=False,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
if program._pipeline_opt is not None:
import paddle
if dataset is not None:
raise RuntimeError("dataset should be None for pipeline mode")
# The following fake dataset is created to call
# the _prepare_trainer api, and it is meaningless.
data_vars = []
for var in program.global_block().vars.values():
if var.is_data:
data_vars.append(var)
dataset = paddle.fluid.DatasetFactory().create_dataset(
'FileInstantDataset')
dataset.set_batch_size(1)
dataset.set_thread(1)
dataset.set_filelist(['None'])
dataset.set_use_var(data_vars)
else:
if dataset is None:
raise RuntimeError("dataset is need and should be initialized")
dataset._prepare_to_run()
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._set_infer(is_infer)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
dataset._dynamic_adjust_before_train(trainer.proto_desc.thread_num)
trainer_instance = self._default_executor.init_for_dataset(
program.desc, trainer._desc(), scope, dataset.dataset)
if fetch_handler is not None:
scope0 = trainer_instance.get_worker_scope(0)
fetch_monitor = FetchHandlerMonitor(scope0, fetch_handler)
fetch_monitor.start()
self._default_executor.run_from_dataset(trainer_instance)
fetch_monitor.stop()
self._default_executor.release_trainer(trainer_instance)
else:
self._default_executor.run_from_dataset(trainer_instance)
self._default_executor.release_trainer(trainer_instance)
dataset._dynamic_adjust_after_train()
dataset._finish_to_run()
return None
def infer_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
"""
Infer from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, infer_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current infer task.
The document of infer_from_dataset is almost the same as train_from_dataset,
except that in distributed training, push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-threadvery easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. Default is 0, which
means using thread num of dataset
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable will be printed during
training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
fetch_handler(FetchHandler): a user define class for fetch output.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.data(name="x", shape=[None, 10, 10], dtype="int64")
y = fluid.data(name="y", shape=[None, 1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
return self._run_from_dataset(program, dataset, scope, thread, True,
debug, fetch_list, fetch_info,
print_period, fetch_handler)
def start_heter_trainer(self,
program=None,
scope=None,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
return self._start_heter_trainer(program, scope, False, debug,
fetch_list, fetch_info, print_period,
fetch_handler)
def _start_heter_trainer(self,
program=None,
scope=None,
is_infer=False,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
scope, trainer = self._prepare_trainer(
program=program,
dataset=None,
scope=scope,
thread=1,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._set_infer(is_infer)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
trainer_instance = self._default_executor.init_for_dataset(
program.desc, trainer._desc(), scope, None)
#if fetch_handler is not None:
# scope0 = trainer_instance.get_worker_scope(0)
# fetch_monitor = FetchHandlerMonitor(scope0, fetch_handler)
# fetch_monitor.start()
# self._default_executor.run_from_dataset(trainer_instance)
# fetch_monitor.stop()
# self._default_executor.release_trainer(trainer_instance)
#else:
self._default_executor.run_from_dataset(trainer_instance)
#self._default_executor.release_trainer(trainer_instance)
return trainer_instance
def train_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
"""
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. Default is 0, which
means using thread num of dataset
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable will be printed
during training
fetch_info(String List): print information for each variable, its length should be equal
to fetch_list
print_period(int): the number of mini-batches for each print, default is 100
fetch_handler(FetchHandler): a user define class for fetch output.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.data(name="x", shape=[None, 10, 10], dtype="int64")
y = fluid.data(name="y", shape=[None, 1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
return self._run_from_dataset(program, dataset, scope, thread, False,
debug, fetch_list, fetch_info,
print_period, fetch_handler)
| 43.121043 | 128 | 0.593223 |
dfa885c2348eb8d5c4c3ee99f858412efa1e82bd | 6,251 | py | Python | tests/conftest.py | oterrier/openapi-python-client | ca8acdbe34b11584143b78afc130684f0690d5bf | [
"MIT"
] | 1 | 2022-03-02T08:15:26.000Z | 2022-03-02T08:15:26.000Z | tests/conftest.py | oterrier/openapi-python-client | ca8acdbe34b11584143b78afc130684f0690d5bf | [
"MIT"
] | null | null | null | tests/conftest.py | oterrier/openapi-python-client | ca8acdbe34b11584143b78afc130684f0690d5bf | [
"MIT"
] | null | null | null | from typing import Any, Callable, Dict
import pytest
from openapi_python_client.parser.properties import (
AnyProperty,
DateProperty,
DateTimeProperty,
EnumProperty,
FileProperty,
IntProperty,
ListProperty,
ModelProperty,
Property,
StringProperty,
UnionProperty,
)
@pytest.fixture
def model_property_factory() -> Callable[..., ModelProperty]:
"""
This fixture surfaces in the test as a function which manufactures ModelProperties with defaults.
You can pass the same params into this as the ModelProperty constructor to override defaults.
"""
from openapi_python_client.parser.properties import Class
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
kwargs = {
"description": "",
"class_info": Class(name="MyClass", module_name="my_module"),
"required_properties": [],
"optional_properties": [],
"relative_imports": set(),
"additional_properties": False,
"python_name": "",
**kwargs,
}
return ModelProperty(**kwargs)
return _factory
@pytest.fixture
def enum_property_factory() -> Callable[..., EnumProperty]:
"""
This fixture surfaces in the test as a function which manufactures EnumProperties with defaults.
You can pass the same params into this as the EnumProerty constructor to override defaults.
"""
from openapi_python_client.parser.properties import Class
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
kwargs = {
"class_info": Class(name=kwargs["name"], module_name=kwargs["name"]),
"values": {},
"value_type": str,
**kwargs,
}
return EnumProperty(**kwargs)
return _factory
@pytest.fixture
def property_factory() -> Callable[..., Property]:
"""
This fixture surfaces in the test as a function which manufactures Properties with defaults.
You can pass the same params into this as the Property constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return Property(**kwargs)
return _factory
@pytest.fixture
def any_property_factory() -> Callable[..., AnyProperty]:
"""
This fixture surfaces in the test as a function which manufactures AnyProperty with defaults.
You can pass the same params into this as the AnyProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return AnyProperty(**kwargs)
return _factory
@pytest.fixture
def string_property_factory() -> Callable[..., StringProperty]:
"""
This fixture surfaces in the test as a function which manufactures StringProperties with defaults.
You can pass the same params into this as the StringProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return StringProperty(**kwargs)
return _factory
@pytest.fixture
def int_property_factory() -> Callable[..., IntProperty]:
"""
This fixture surfaces in the test as a function which manufactures StringProperties with defaults.
You can pass the same params into this as the StringProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return IntProperty(**kwargs)
return _factory
@pytest.fixture
def date_time_property_factory() -> Callable[..., DateTimeProperty]:
"""
This fixture surfaces in the test as a function which manufactures DateTimeProperties with defaults.
You can pass the same params into this as the DateTimeProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return DateTimeProperty(**kwargs)
return _factory
@pytest.fixture
def date_property_factory() -> Callable[..., DateProperty]:
"""
This fixture surfaces in the test as a function which manufactures DateProperties with defaults.
You can pass the same params into this as the DateProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return DateProperty(**kwargs)
return _factory
@pytest.fixture
def file_property_factory() -> Callable[..., FileProperty]:
"""
This fixture surfaces in the test as a function which manufactures FileProperties with defaults.
You can pass the same params into this as the FileProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
return FileProperty(**kwargs)
return _factory
@pytest.fixture
def list_property_factory(string_property_factory) -> Callable[..., ListProperty]:
"""
This fixture surfaces in the test as a function which manufactures ListProperties with defaults.
You can pass the same params into this as the ListProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
if "inner_property" not in kwargs:
kwargs["inner_property"] = string_property_factory()
return ListProperty(**kwargs)
return _factory
@pytest.fixture
def union_property_factory(date_time_property_factory, string_property_factory) -> Callable[..., UnionProperty]:
"""
This fixture surfaces in the test as a function which manufactures UnionProperties with defaults.
You can pass the same params into this as the UnionProperty constructor to override defaults.
"""
def _factory(**kwargs):
kwargs = _common_kwargs(kwargs)
if "inner_properties" not in kwargs:
kwargs["inner_properties"] = [date_time_property_factory(), string_property_factory()]
return UnionProperty(**kwargs)
return _factory
def _common_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:
kwargs = {
"name": "test",
"required": True,
"nullable": False,
"default": None,
**kwargs,
}
if not kwargs.get("python_name"):
kwargs["python_name"] = kwargs["name"]
return kwargs
| 28.674312 | 112 | 0.681011 |
32f11132095e12efea852f9cbbc4009e5275c1d6 | 6,496 | py | Python | classes/realsense.py | snavas/PyMote | 9ac51251abbc943fcd36fbb58ff5c3031d375c14 | [
"MIT"
] | 3 | 2021-01-09T13:51:49.000Z | 2022-02-22T20:32:34.000Z | classes/realsense.py | snavas/GECCO | 9ac51251abbc943fcd36fbb58ff5c3031d375c14 | [
"MIT"
] | 11 | 2020-11-20T11:06:34.000Z | 2022-03-12T00:54:17.000Z | classes/realsense.py | snavas/PyMote | 9ac51251abbc943fcd36fbb58ff5c3031d375c14 | [
"MIT"
] | null | null | null | #from device import Device
from classes.device import Device
import pyrealsense2 as rs
import numpy as np
import cv2
class RealSense(Device):
#pipeline = rs.pipeline()
def getcolorintrinsics(self):
return self.color_intr
def getdepthintrinsics(self):
return self.depth_intr
def getirintrinsics(self):
return self.ir_intr
# overriding abstract method
def __init__(self, id, depth, ir):
ctx = rs.context()
devices = ctx.query_devices()
print("<*> Connected devices: ")
print(*devices, sep="\n")
# TODO: Posible improvement to better query devices:
#>> > d = ctx.load_device("C:\\Users\\local_admin\\Documents\\20180212_000327.bag")
#>> > s = d.query_sensors()[0]
#>> > s.get_stream_profiles()[0]
# Configure depth and color streams
self.pipeline = rs.pipeline()
config = rs.config()
if len(devices) > 0:
print("<*> Using device: ", id)
config.enable_device(id)
else:
print("<*> Realsense device not found, loading: ", id)
# Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, id)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
if depth:
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
if ir:
config.enable_stream(rs.stream.infrared, 1280, 720)
# Start streaming
profile = self.pipeline.start(config)
self.color_intr = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
if ir:
self.ir_intr = profile.get_stream(rs.stream.infrared).as_video_stream_profile().get_intrinsics()
if depth:
self.depth_intr = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
self.depth_scale = depth_sensor.get_depth_scale()
print("<*> Depth Scale is: ", self.depth_scale)
try:
# increase lase power
laser_range = depth_sensor.get_option_range(rs.option.laser_power)
depth_sensor.set_option(rs.option.laser_power, laser_range.max)
# turn off auto-exposure
depth_sensor.set_option(rs.option.enable_auto_exposure, False)
except Exception as e:
pass
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 1.15 # 1 meter
self.clipping_distance = clipping_distance_in_meters / self.depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
if ir:
align_to = rs.stream.depth
else:
align_to = rs.stream.color
self.align = rs.align(align_to)
for index in range(5):
self.pipeline.wait_for_frames()
def getdepthscale(self):
return self.depth_scale
# overriding abstract method
# Streaming loop
def getstream(self):
# Wait for a coherent pair of frames: depth and color
frames = self.pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# depth_frame = frames.get_depth_frame()
# color_frame = frames.get_color_frame()
aligned_frames = self.align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
aligned_color_frame = aligned_frames.get_color_frame()
#if not depth_frame or not color_frame:
# continue
# Convert images to numpy arrays
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(aligned_color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# Stack both images horizontally
# images = np.hstack((color_image, depth_colormap))
return depth_colormap
def getcolorstream(self):
frames = self.pipeline.wait_for_frames()
aligned_color_frame = self.align.process(frames).get_color_frame()
color_image = np.asanyarray(aligned_color_frame.get_data())
return color_image
def getirstream(self):
frames = self.pipeline.wait_for_frames()
aligned_infrared_frame = self.align.process(frames).get_infrared_frame()
infrared_image = np.asanyarray(aligned_infrared_frame.get_data())
return infrared_image
def getdepthstream(self):
frames = self.pipeline.wait_for_frames()
aligned_depth_frame = self.align.process(frames).get_depth_frame()
depth_image = np.asanyarray(aligned_depth_frame.get_data())
#scaled_image = cv2.convertScaleAbs(depth_image, alpha=0.03)
return depth_image
def getdepthcolormap(self):
depth_image = self.getdepthstream()
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
return depth_colormap
def getsegmentedstream(self):
frames = self.pipeline.wait_for_frames()
aligned_frames = self.align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
grey_color = 255
depth_image_3d = np.dstack((depth_image, depth_image, depth_image)) # depth image is 1 channel, color is 3 channels
bg_removed = np.where((depth_image_3d > self.clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
#depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
return bg_removed
def stop(self):
self.pipeline.stop()
def restart(self):
self.pipeline.start() | 41.909677 | 124 | 0.664409 |
d4425032785310871a42971b84fc330ff517f757 | 874 | py | Python | tests/simple_pages_test.py | EDS435/IS219-Advanced | 06f3c358a781d4d41b13716536e20260228a2e29 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T16:05:47.000Z | 2022-03-04T16:05:47.000Z | tests/simple_pages_test.py | EDS435/IS219-Advanced | 06f3c358a781d4d41b13716536e20260228a2e29 | [
"BSD-3-Clause"
] | 1 | 2022-03-02T20:18:51.000Z | 2022-03-02T20:18:51.000Z | tests/simple_pages_test.py | EDS435/IS219-Advanced | 06f3c358a781d4d41b13716536e20260228a2e29 | [
"BSD-3-Clause"
] | 4 | 2022-02-23T18:37:17.000Z | 2022-02-23T21:17:32.000Z | """This test the homepage"""
def test_request_main_menu_links(client):
"""This makes the index page"""
response = client.get("/")
assert response.status_code == 200
assert b'<li><a href="/page/about">About</a></li>' in response.data
assert b'<li><a href="/page/welcome">Welcome</a></li>' in response.data
def test_request_about(client):
"""This makes the index page"""
response = client.get("/page/about")
assert response.status_code == 200
assert b"About Page" in response.data
def test_request_welcome(client):
"""This makes the index page"""
response = client.get("/page/welcome")
assert response.status_code == 200
assert b"Welcome Page" in response.data
def test_request_page_not_found(client):
"""This makes the index page"""
response = client.get("/page/page5")
assert response.status_code == 404
| 31.214286 | 75 | 0.683066 |
804673849be60263f7484601d158a27e8ad2e092 | 540 | py | Python | ectyper/loggingFunctions.py | phac-nml/ecoli_serotyping | 52293255ca02dd9631d637265ac7d6a5d7bfbb82 | [
"Apache-2.0"
] | 19 | 2017-01-19T18:05:23.000Z | 2021-07-26T22:16:42.000Z | ectyper/loggingFunctions.py | phac-nml/ecoli_serotyping | 52293255ca02dd9631d637265ac7d6a5d7bfbb82 | [
"Apache-2.0"
] | 17 | 2017-07-19T17:26:43.000Z | 2022-01-14T19:19:39.000Z | ectyper/loggingFunctions.py | phac-nml/ecoli_serotyping | 52293255ca02dd9631d637265ac7d6a5d7bfbb82 | [
"Apache-2.0"
] | 6 | 2017-01-19T18:21:17.000Z | 2022-03-07T20:46:12.000Z | import logging
def create_logger():
"""
Create the logger for ectyper
:return: The root logger for the program
"""
log = logging.getLogger('ectyper')
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
log.setLevel(logging.DEBUG)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setFormatter(formatter)
#console.setLevel(logging.INFO)
log.addHandler(console)
return log
| 20.769231 | 77 | 0.672222 |
f12c8f5a1f9e57fc3e186b8331f1d0a2a45f18bb | 750 | py | Python | python/src/leetcode/2020/20.py | ccampo133/coding-challenges | 618b51d2d6f6946d3756c68b0008a178d80952ae | [
"MIT"
] | null | null | null | python/src/leetcode/2020/20.py | ccampo133/coding-challenges | 618b51d2d6f6946d3756c68b0008a178d80952ae | [
"MIT"
] | null | null | null | python/src/leetcode/2020/20.py | ccampo133/coding-challenges | 618b51d2d6f6946d3756c68b0008a178d80952ae | [
"MIT"
] | null | null | null | class Solution:
def isValid(self, s: str) -> bool:
closed = {
')': '(',
'}': '{',
']': '['
}
stack = []
for c in s:
if c in closed:
if len(stack) == 0:
return False
prev = stack.pop()
if closed[c] != prev:
return False
else:
stack.append(c)
return len(stack) == 0
if __name__ == '__main__':
soln = Solution()
s1 = '()'
s2 = '()[]{}'
s3 = '(]'
s4 = '([)]'
s5 = '{[]}'
assert soln.isValid(s1)
assert soln.isValid(s2)
assert not soln.isValid(s3)
assert not soln.isValid(s4)
assert soln.isValid(s5)
| 22.058824 | 38 | 0.392 |
4d2b25f9cde689a7c1b019eed38b4011963ea06e | 13,845 | py | Python | nets-in-progress/8r-1c-srv6-pm/isis8d.py | x-Ultra/rose-srv6-tutorial | fb1a074d0490234eeec20f491bd39ceb91156994 | [
"Apache-2.0"
] | 8 | 2020-06-28T15:33:48.000Z | 2021-10-15T00:18:49.000Z | nets-in-progress/8r-1c-srv6-pm/isis8d.py | x-Ultra/rose-srv6-tutorial | fb1a074d0490234eeec20f491bd39ceb91156994 | [
"Apache-2.0"
] | 9 | 2020-05-12T22:44:06.000Z | 2022-01-04T02:11:45.000Z | nets-in-progress/8r-1c-srv6-pm/isis8d.py | x-Ultra/rose-srv6-tutorial | fb1a074d0490234eeec20f491bd39ceb91156994 | [
"Apache-2.0"
] | 4 | 2020-06-16T10:32:27.000Z | 2021-11-27T14:34:01.000Z | #!/usr/bin/python
# pylint: disable=missing-module-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-class-docstring
import os
import shutil
import sys
from argparse import ArgumentParser
import python_hosts
from dotenv import load_dotenv
from mininet.cli import CLI
# from mininet.link import Link
from mininet.log import setLogLevel
from mininet.net import Mininet
# from mininet.topo import Topo
from mininet.node import Host, OVSBridge
from mininet.util import dumpNodeConnections
from time import sleep
# BASEDIR = "/home/user/mytests/ospf3routers/nodeconf/"
BASEDIR = os.getcwd() + "/nodeconf/"
OUTPUT_PID_TABLE_FILE = "/tmp/pid_table_file.txt"
PRIVDIR = '/var/priv'
# Path of the file containing the entries (ip-hostname)
# to be added to /etc/hosts
ETC_HOSTS_FILE = './etc-hosts'
# Define whether to add Mininet nodes to /etc/hosts file or not
ADD_ETC_HOSTS = True
# Define whether to start the node managers on the routers or not
START_NODE_MANAGERS = False
# Load environment variables from .env file
load_dotenv()
# Get node manager path
NODE_MANAGER_PATH = os.getenv('NODE_MANAGER_PATH', None)
if NODE_MANAGER_PATH is not None:
NODE_MANAGER_PATH = os.path.join(NODE_MANAGER_PATH,
'srv6_manager.py')
# Get gRPC server port
NODE_MANAGER_GRPC_PORT = os.getenv('NODE_MANAGER_GRPC_PORT', None)
class BaseNode(Host):
def __init__(self, name, *args, **kwargs):
dirs = [PRIVDIR]
Host.__init__(self, name, privateDirs=dirs, *args, **kwargs)
self.dir = "/tmp/%s" % name
self.nets = []
if not os.path.exists(self.dir):
os.makedirs(self.dir)
def config(self, **kwargs):
# pylint: disable=arguments-differ
# Init steps
Host.config(self, **kwargs)
# Iterate over the interfaces
# first = True
for intf in self.intfs.values():
# Remove any configured address
self.cmd('ifconfig %s 0' % intf.name)
# # For the first one, let's configure the mgmt address
# if first:
# first = False
# self.cmd('ip a a %s dev %s' %(kwargs['mgmtip'], intf.name))
# let's write the hostname in /var/mininet/hostname
self.cmd("echo '" + self.name + "' > " + PRIVDIR + "/hostname")
if os.path.isfile(BASEDIR + self.name + "/start.sh"):
self.cmd('source %s' % BASEDIR + self.name + "/start.sh")
def cleanup(self):
def remove_if_exists(filename):
if os.path.exists(filename):
os.remove(filename)
Host.cleanup(self)
# Rm dir
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
remove_if_exists(BASEDIR + self.name + "/zebra.pid")
remove_if_exists(BASEDIR + self.name + "/zebra.log")
remove_if_exists(BASEDIR + self.name + "/zebra.sock")
remove_if_exists(BASEDIR + self.name + "/isis8d.pid")
remove_if_exists(BASEDIR + self.name + "/isis8d.log")
remove_if_exists(BASEDIR + self.name + "/isisd.log")
remove_if_exists(BASEDIR + self.name + "/isisd.pid")
remove_if_exists(OUTPUT_PID_TABLE_FILE)
# if os.path.exists(BASEDIR+self.name+"/zebra.pid"):
# os.remove(BASEDIR+self.name+"/zebra.pid")
# if os.path.exists(BASEDIR+self.name+"/zebra.log"):
# os.remove(BASEDIR+self.name+"/zebra.log")
# if os.path.exists(BASEDIR+self.name+"/zebra.sock"):
# os.remove(BASEDIR+self.name+"/zebra.sock")
# if os.path.exists(BASEDIR+self.name+"/ospfd.pid"):
# os.remove(BASEDIR+self.name+"/ospfd.pid")
# if os.path.exists(BASEDIR+self.name+"/ospfd.log"):
# os.remove(BASEDIR+self.name+"/ospfd.log")
# if os.path.exists(OUTPUT_PID_TABLE_FILE):
# os.remove(OUTPUT_PID_TABLE_FILE)
class Router(BaseNode):
def __init__(self, name, *args, **kwargs):
BaseNode.__init__(self, name, *args, **kwargs)
def config(self, **kwargs):
# pylint: disable=arguments-differ
# Init steps
BaseNode.config(self, **kwargs)
# Start node managers
if START_NODE_MANAGERS:
self.cmd('python %s --grpc-port %s &'
% (NODE_MANAGER_PATH, NODE_MANAGER_GRPC_PORT))
class Switch(OVSBridge):
def __init__(self, name, *args, **kwargs):
# dirs = [PRIVDIR]
OVSBridge.__init__(self, name, *args, **kwargs)
self.dir = "/tmp/%s" % name
self.nets = []
if not os.path.exists(self.dir):
os.makedirs(self.dir)
def config(self, **kwargs):
# pylint: disable=arguments-differ
# Init steps
OVSBridge.config(self, **kwargs)
# Iterate over the interfaces
for intf in self.intfs.values():
# Remove any configured address
self.cmd('ifconfig %s 0' % intf.name)
# # For the first one, let's configure the mgmt address
# if first:
# first = False
# self.cmd('ip a a %s dev %s' %(kwargs['mgmtip'], intf.name))
# let's write the hostname in /var/mininet/hostname
self.cmd("echo '" + self.name + "' > " + PRIVDIR + "/hostname")
if os.path.isfile(BASEDIR + self.name + "/start.sh"):
self.cmd('source %s' % BASEDIR + self.name + "/start.sh")
def cleanup(self):
# def remove_if_exists(filename):
# if os.path.exists(filename):
# os.remove(filename)
OVSBridge.cleanup(self)
# Rm dir
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
# the add_link function creates a link and assigns the interface names
# as node1-node2 and node2-node1
def add_link(my_net, node1, node2):
my_net.addLink(node1, node2, intfName1=node1.name + '-' + node2.name,
intfName2=node2.name + '-' + node1.name)
def create_topo(my_net):
# pylint: disable=invalid-name, too-many-locals, too-many-statements
h11 = my_net.addHost(name='h11', cls=BaseNode)
h12 = my_net.addHost(name='h12', cls=BaseNode)
h13 = my_net.addHost(name='h13', cls=BaseNode)
h31 = my_net.addHost(name='h31', cls=BaseNode)
h32 = my_net.addHost(name='h32', cls=BaseNode)
h33 = my_net.addHost(name='h33', cls=BaseNode)
h51 = my_net.addHost(name='h51', cls=BaseNode)
h52 = my_net.addHost(name='h52', cls=BaseNode)
h53 = my_net.addHost(name='h53', cls=BaseNode)
h81 = my_net.addHost(name='h81', cls=BaseNode)
h82 = my_net.addHost(name='h82', cls=BaseNode)
h83 = my_net.addHost(name='h83', cls=BaseNode)
hdc1 = my_net.addHost(name='hdc1', cls=BaseNode)
hdc2 = my_net.addHost(name='hdc2', cls=BaseNode)
hdc3 = my_net.addHost(name='hdc3', cls=BaseNode)
controller = my_net.addHost(name='controller', cls=BaseNode,
sshd=False, inNamespace=False)
r1 = my_net.addHost(name='r1', cls=Router)
r2 = my_net.addHost(name='r2', cls=Router)
r3 = my_net.addHost(name='r3', cls=Router)
r4 = my_net.addHost(name='r4', cls=Router)
r5 = my_net.addHost(name='r5', cls=Router)
r6 = my_net.addHost(name='r6', cls=Router)
r7 = my_net.addHost(name='r7', cls=Router)
r8 = my_net.addHost(name='r8', cls=Router)
# note that if the interface names are not provided,
# the order of adding link will determine the
# naming of the interfaces (e.g. on r1: r1-eth0, r1-eth1, r1-eth2...)
# it is possible to provide names as follows
# Link(h1, r1, intfName1='h1-eth0', intfName2='r1-eth0')
# the add_link function creates a link and assigns the interface names
# as node1-node2 and node2-node1
# hosts of r1
add_link(my_net, h11, r1)
add_link(my_net, h12, r1)
add_link(my_net, h13, r1)
# r1 - r2
add_link(my_net, r1, r2)
# datacenters of r2
add_link(my_net, hdc1, r2)
# r2 - r3
add_link(my_net, r2, r3)
# r2 - r7
add_link(my_net, r2, r7)
# hosts of r3
add_link(my_net, h31, r3)
add_link(my_net, h32, r3)
add_link(my_net, h33, r3)
# r3 - r4
add_link(my_net, r3, r4)
# r4 - r5
add_link(my_net, r4, r5)
# r4 - r6
add_link(my_net, r4, r6)
# hosts of r5
add_link(my_net, h51, r5)
add_link(my_net, h52, r5)
add_link(my_net, h53, r5)
# datacenters of r5
add_link(my_net, hdc3, r5)
# r5 - r6
add_link(my_net, r5, r6)
# r6 - r7
add_link(my_net, r6, r7)
# r6 - r8
add_link(my_net, r6, r8)
# r7 - r8
add_link(my_net, r7, r8)
# hosts of r8
add_link(my_net, h81, r8)
add_link(my_net, h82, r8)
add_link(my_net, h83, r8)
# datacenters of r8
add_link(my_net, hdc2, r8)
# Create the mgmt switch
sw = my_net.addSwitch(name='sw', cls=Switch, dpid='1')
# Create a link between mgmt switch and controller
add_link(my_net, controller, sw)
# Connect all the routers to the management network
add_link(my_net, r1, sw)
add_link(my_net, r2, sw)
add_link(my_net, r3, sw)
add_link(my_net, r4, sw)
add_link(my_net, r5, sw)
add_link(my_net, r6, sw)
add_link(my_net, r7, sw)
add_link(my_net, r8, sw)
def add_nodes_to_etc_hosts():
# Get /etc/hosts
etc_hosts = python_hosts.hosts.Hosts()
# Import host-ip mapping defined in etc-hosts file
count = etc_hosts.import_file(ETC_HOSTS_FILE)
# Print results
count = count['add_result']['ipv6_count'] + \
count['add_result']['ipv4_count']
print('*** Added %s entries to /etc/hosts\n' % count)
def remove_nodes_from_etc_hosts(net):
print('*** Removing entries from /etc/hosts\n')
# Get /etc/hosts
etc_hosts = python_hosts.hosts.Hosts()
for host in net.hosts:
# Remove all the nodes from /etc/hosts
etc_hosts.remove_all_matching(name=str(host))
# Remove entries related to the management network
# These entries are in the form *.m (e.g. r1.m, controller.m)
# therefore they are not removed during the previous loop
for host in net.hosts:
etc_hosts.remove_all_matching(name='%s.m' % host)
# Write changes to /etc/hosts
etc_hosts.write()
def stop_all():
# Clean Mininet emulation environment
os.system('sudo mn -c')
# Kill all the started daemons
os.system('sudo killall zebra isisd')
def extract_host_pid(dumpline):
temp = dumpline[dumpline.find('pid=') + 4:]
return int(temp[:len(temp) - 2])
def simple_test():
global choosed_sender, choosed_reflector
"Create and test a simple network"
# topo = RoutersTopo()
# net = Mininet(topo=topo, build=False, controller=None)
net = Mininet(topo=None, build=False, controller=None)
create_topo(net)
net.build()
net.start()
print("Dumping host connections")
dumpNodeConnections(net.hosts)
# print "Testing network connectivity"
# net.pingAll()
with open(OUTPUT_PID_TABLE_FILE, "w") as file:
for host in net.hosts:
file.write("%s %d\n" % (host, extract_host_pid(repr(host))))
# Add Mininet nodes to /etc/hosts
if ADD_ETC_HOSTS:
add_nodes_to_etc_hosts()
# This waiting time is needed because after several tests we noticed that
# it is needed some time to let each node of the topology to be set up correctly
print("Waiting the components of the topology to set up correctly (40 seconds)")
sleep(40)
CLI(net)
# Remove Mininet nodes from /etc/hosts
if ADD_ETC_HOSTS:
remove_nodes_from_etc_hosts(net)
net.stop()
stop_all()
def parse_arguments():
# Get parser
parser = ArgumentParser(
description='Emulation of a Mininet topology (8 routers running '
'IS-IS, 1 controller out-of-band'
)
parser.add_argument(
'--start-node-managers', dest='start_node_managers',
action='store_true', default=False,
help='Define whether to start node manager on routers or not'
)
parser.add_argument(
'--no-etc-hosts', dest='add_etc_hosts',
action='store_false', default=True,
help='Define whether to add Mininet nodes to /etc/hosts file or not'
)
# Parse input parameters
args = parser.parse_args()
# Return the arguments
return args
def __main():
global ADD_ETC_HOSTS # pylint: disable=global-statement
global START_NODE_MANAGERS # pylint: disable=global-statement
global NODE_MANAGER_GRPC_PORT # pylint: disable=global-statement
global net
global choosed_sender, choosed_reflector
# Parse command-line arguments
args = parse_arguments()
# Define whether to start node manager on routers or not
START_NODE_MANAGERS = args.start_node_managers
if START_NODE_MANAGERS:
if NODE_MANAGER_PATH is None:
print('Error: --start-node-managers requires NODE_MANAGER_PATH '
'variable')
print('NODE_MANAGER_PATH variable not set in .env file\n')
sys.exit(-2)
if not os.path.exists(NODE_MANAGER_PATH):
print('Error: --start-node-managers requires NODE_MANAGER_PATH '
'variable')
print('NODE_MANAGER_PATH defined in .env file '
'points to a non existing folder\n')
sys.exit(-2)
if NODE_MANAGER_GRPC_PORT is None:
print('Error: --start-node-managers requires '
'NODE_MANAGER_GRPC_PORT variable')
print('NODE_MANAGER_GRPC_PORT variable not set in .env file\n')
sys.exit(-2)
# Define whether to add Mininet nodes to /etc/hosts file or not
ADD_ETC_HOSTS = args.add_etc_hosts
# Tell mininet to print useful information
setLogLevel('info')
simple_test()
if __name__ == '__main__':
__main()
| 32.964286 | 84 | 0.635247 |
b1f9a4604433639a386c822a7d132a063e8b45cb | 9,376 | py | Python | xlsxwriter/test/worksheet/test_sparkline11.py | yxwlr995/-Python-Pandas-XlsxWriter | cd28c1b968795b67f3013c49a0e02ffda5898163 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/worksheet/test_sparkline11.py | yxwlr995/-Python-Pandas-XlsxWriter | cd28c1b968795b67f3013c49a0e02ffda5898163 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/worksheet/test_sparkline11.py | yxwlr995/-Python-Pandas-XlsxWriter | cd28c1b968795b67f3013c49a0e02ffda5898163 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
from ..compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = 'Sheet1'
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row('A1', data)
worksheet.write_row('A2', data)
worksheet.write_row('A3', data)
worksheet.write_row('A4', [1, 2, 3, 4, 5])
# Set up sparklines.
worksheet.add_sparkline('F1', {'range': 'A1:E1',
'max': 0.5,
'min':-0.5,
'axis': True,
'reverse': True,
'empty_cells': 'zero',
'weight': 0.25,
'high_point': True,
'low_point': True,
'negative_points': True,
'first_point': True,
'last_point': True,
'markers': True,
})
worksheet.add_sparkline('F2', {'range': 'A2:E2',
'max': 'group',
'min': 'group',
'empty_cells': 'connect',
'weight': 2.25,
})
worksheet.add_sparkline('F3', {'range': 'A3:E3',
'max': 'group',
'min': '0',
'show_hidden': True,
'weight': 6,
'date_axis': 'A4:E4',
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
<row r="3" spans="1:5" x14ac:dyDescent="0.25">
<c r="A3">
<v>-2</v>
</c>
<c r="B3">
<v>2</v>
</c>
<c r="C3">
<v>3</v>
</c>
<c r="D3">
<v>-1</v>
</c>
<c r="E3">
<v>0</v>
</c>
</row>
<row r="4" spans="1:5" x14ac:dyDescent="0.25">
<c r="A4">
<v>1</v>
</c>
<c r="B4">
<v>2</v>
</c>
<c r="C4">
<v>3</v>
</c>
<c r="D4">
<v>4</v>
</c>
<c r="E4">
<v>5</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup manualMin="0" lineWeight="6" dateAxis="1" displayEmptyCellsAs="gap" displayHidden="1" minAxisType="custom" maxAxisType="group">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<xm:f>Sheet1!A4:E4</xm:f>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A3:E3</xm:f>
<xm:sqref>F3</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup lineWeight="2.25" displayEmptyCellsAs="span" minAxisType="group" maxAxisType="group">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A2:E2</xm:f>
<xm:sqref>F2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
<x14:sparklineGroup manualMax="0.5" manualMin="-0.5" lineWeight="0.25" markers="1" high="1" low="1" first="1" last="1" negative="1" displayXAxis="1" minAxisType="custom" maxAxisType="custom" rightToLeft="1">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| 44.018779 | 337 | 0.360602 |
fbdf99fa9089d23f97064fb031bacc1f156ee4d0 | 5,784 | py | Python | nf_ea_com_bnop_source/b_code/migrations/nf_ea_com_to_bnop/migrators/ea_subtyping_connectors_migrator.py | boro-alpha/nf_ea_com_bnop | 21a500bc84304b2709a5a58dbea1d69cfc048f7e | [
"MIT"
] | null | null | null | nf_ea_com_bnop_source/b_code/migrations/nf_ea_com_to_bnop/migrators/ea_subtyping_connectors_migrator.py | boro-alpha/nf_ea_com_bnop | 21a500bc84304b2709a5a58dbea1d69cfc048f7e | [
"MIT"
] | null | null | null | nf_ea_com_bnop_source/b_code/migrations/nf_ea_com_to_bnop/migrators/ea_subtyping_connectors_migrator.py | boro-alpha/nf_ea_com_bnop | 21a500bc84304b2709a5a58dbea1d69cfc048f7e | [
"MIT"
] | null | null | null | from bnop_source.b_code.bnop_facades import BnopFacades
from bnop_source.b_code.core.object_model.bnop_repositories import BnopRepositories
from bnop_source.b_code.core.object_model.objects.bnop_objects import BnopObjects
from boro_common_source.ckids.boro_object_ckids import BoroObjectCkIds
from nf_common_source.code.constants.standard_constants import DEFAULT_NULL_VALUE
from nf_common_source.code.nf.types.nf_column_types import NfColumnTypes
from nf_common_source.code.services.dataframe_service.dataframe_mergers import inner_merge_dataframes
from nf_ea_common_tools_source.b_code.nf_ea_common.common_knowledge.ea_connector_types import EaConnectorTypes
from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.common_knowledge.collection_types.nf_ea_com_collection_types import NfEaComCollectionTypes
from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.common_knowledge.column_types.nf_ea_com_column_types import NfEaComColumnTypes
from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.nf_ea_com_universes import NfEaComUniverses
SUBTYPE_UML_NAMES_COLUMN = \
'subtype_uml_names'
SUPERTYPE_UML_NAMES_COLUMN = \
'supertype_uml_names'
def migrate_ea_connectors_in_scope_of_subtyping_pattern(
nf_ea_com_universe: NfEaComUniverses,
bnop_repository: BnopRepositories):
subtyping_ea_connectors = \
__get_subtyping_connectors(
nf_ea_com_universe=nf_ea_com_universe)
__migrate_subtyping_connectors(
ea_connectors=subtyping_ea_connectors,
bnop_repository=bnop_repository)
def __get_subtyping_connectors(
nf_ea_com_universe: NfEaComUniverses) \
-> list:
ea_connectors = \
nf_ea_com_universe.nf_ea_com_registry.dictionary_of_collections[NfEaComCollectionTypes.EA_CONNECTORS]
ea_classifiers = \
nf_ea_com_universe.nf_ea_com_registry.dictionary_of_collections[NfEaComCollectionTypes.EA_CLASSIFIERS]
subtyping_ea_connectors = \
ea_connectors[ea_connectors[
NfEaComColumnTypes.CONNECTORS_ELEMENT_TYPE_NAME.column_name] == EaConnectorTypes.GENERALIZATION.type_name]
subtyping_ea_connectors_with_uml_names_dataframe = \
inner_merge_dataframes(
master_dataframe=subtyping_ea_connectors,
master_dataframe_key_columns=[
NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS.column_name],
merge_suffixes=['', '_type_uml_names'],
foreign_key_dataframe=ea_classifiers,
foreign_key_dataframe_fk_columns=[NfColumnTypes.NF_UUIDS.column_name],
foreign_key_dataframe_other_column_rename_dictionary=
{
NfEaComColumnTypes.EXPLICIT_OBJECTS_EA_OBJECT_NAME.column_name: SUPERTYPE_UML_NAMES_COLUMN
})
subtyping_ea_connectors_with_uml_names_dataframe = \
inner_merge_dataframes(
master_dataframe=subtyping_ea_connectors_with_uml_names_dataframe,
master_dataframe_key_columns=[
NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name],
merge_suffixes=['', '_instance_uml_names'],
foreign_key_dataframe=ea_classifiers,
foreign_key_dataframe_fk_columns=[NfColumnTypes.NF_UUIDS.column_name],
foreign_key_dataframe_other_column_rename_dictionary=
{
NfEaComColumnTypes.EXPLICIT_OBJECTS_EA_OBJECT_NAME.column_name: SUBTYPE_UML_NAMES_COLUMN
})
subtyping_ea_connectors_with_uml_names_dataframe.fillna(
value=DEFAULT_NULL_VALUE,
inplace=True)
typing_ea_connectors_with_uml_names = \
subtyping_ea_connectors_with_uml_names_dataframe.to_dict(
orient='records')
return \
typing_ea_connectors_with_uml_names
def __migrate_subtyping_connectors(
ea_connectors: list,
bnop_repository: BnopRepositories):
for ea_connector in ea_connectors:
__migrate_subtyping_connector(
ea_connector=ea_connector,
bnop_repository=bnop_repository)
def __migrate_subtyping_connector(
bnop_repository: BnopRepositories,
ea_connector: dict):
subtyping_tuple_nf_uuid = \
ea_connector[NfColumnTypes.NF_UUIDS.column_name]
subtype_nf_uuid = \
ea_connector[NfEaComColumnTypes.ELEMENTS_SUPPLIER_PLACE1_END_CONNECTORS.column_name]
subtype_uml_name = \
ea_connector[SUBTYPE_UML_NAMES_COLUMN]
supertype_nf_uuid = \
ea_connector[NfEaComColumnTypes.ELEMENTS_CLIENT_PLACE2_END_CONNECTORS.column_name]
supertype_uml_name = \
ea_connector[SUPERTYPE_UML_NAMES_COLUMN]
if subtype_nf_uuid in BnopObjects.registry_keyed_on_uuid:
bnop_subtype = \
BnopObjects.registry_keyed_on_uuid[subtype_nf_uuid]
else:
bnop_subtype = \
BnopFacades.create_bnop_object(
object_uuid=subtype_nf_uuid,
owning_repository_uuid=bnop_repository.uuid,
presentation_name=subtype_uml_name)
if supertype_nf_uuid in BnopObjects.registry_keyed_on_uuid:
bnop_supertype = \
BnopObjects.registry_keyed_on_uuid[supertype_nf_uuid]
else:
bnop_supertype = \
BnopFacades.create_bnop_type(
type_uuid=supertype_nf_uuid,
owning_repository_uuid=bnop_repository.uuid,
presentation_name=supertype_uml_name)
BnopFacades.create_bnop_tuple_from_two_placed_objects(
tuple_uuid=subtyping_tuple_nf_uuid,
placed1_object=bnop_supertype,
placed2_object=bnop_subtype,
immutable_minor_composition_couple_type_boro_object_ckid=BoroObjectCkIds.SuperSubTypes,
owning_repository_uuid=bnop_repository.uuid)
| 42.529412 | 155 | 0.763658 |
e4098159c8d77581a4206fd7e90ebd21515f728a | 7,127 | py | Python | utils.py | hengshuangliu/Practice-of-JSRG | 90a13004d5e7e37bab0ea3f58d53b9250d97adc8 | [
"Apache-2.0"
] | null | null | null | utils.py | hengshuangliu/Practice-of-JSRG | 90a13004d5e7e37bab0ea3f58d53b9250d97adc8 | [
"Apache-2.0"
] | null | null | null | utils.py | hengshuangliu/Practice-of-JSRG | 90a13004d5e7e37bab0ea3f58d53b9250d97adc8 | [
"Apache-2.0"
] | null | null | null | #! usr/bin/env python
"""
Created on Fri May 20 09:09:27 2016
merge images and extract a part of them randomly.
@author: shuang
"""
#import tensorflow as tf
import numpy as np
import os
import random
import shutil
import PIL.Image
from cStringIO import StringIO
from IPython.display import clear_output, Image, display
import time
#---------------------------------------Configure---------------------------------------------------------
#local parameters
debug=False
# 1 for merge: merge files in the two directory into destination directory.
# 2 for random_extract:
# 3 for displayArray:
RUN_FUNCTION=2
# merge function arguments.
IMAGE_DIR1='old.data/pic0529/pic_y2'
IMAGE_DIR2='old.data/pic0529/pic_y3'
MERGE_DIR='old.data/pic0529/pic_y'
# random_extract parameters.
SOURCE_DIR='data/multi_pic/pic_no/pic'
EXTRACT_DIR='data/multi_pic/pic_no/pic_19'
EXTRACT_NUM=250
SUFFIX_LIST=[] # if string_list is empty, no file limited. for example, suffix=['.txt']
IF_CP=False # if IF_CP=True, copy file, otherwise move file.
#--------------------------------------Functions---------------------------------------------------------
def merge(merge_dir=MERGE_DIR, image_dir1=IMAGE_DIR1, image_dir2=IMAGE_DIR2):
"""
Merge files in the two directory into destination directory.
Args:
merge_dir: destination directory,string.
image_dir1: string.
image_dir2: string.
Returns: bool, True for success, and False for fail.
"""
print 'current work directory:',os.getcwd()
dir1_filename_list=[]
dir2_filename_list=[]
try:
if not os.path.exists(image_dir1):
raise ValueError('%s is not exist.'%image_dir1)
if not os.path.exists(image_dir2):
raise ValueError('%s is not exist.'%image_dir2)
dir1_filename_list=os.listdir(image_dir1)
dir2_filename_list=os.listdir(image_dir2)
same_filename=[]
if len(dir1_filename_list)==0:
raise ValueError('%s is empty.'%image_dir1)
if len(dir2_filename_list)==0:
raise ValueError('%s is empty'%image_dir2)
for filename in dir1_filename_list:
if filename in dir2_filename_list:
same_filename.append(filename)
if not os.path.exists(merge_dir):
print 'merge_dir:',merge_dir,' is not exist.'
os.mkdir(merge_dir)
print 'merge_dir:',merge_dir,'is created.'
if len(same_filename)>0:
print 'those file have same name in %s and %s'%(image_dir1,image_dir2)
print same_filename
if_rename=raw_input('rename them or give up merge them? (r=rename,g=give up):')
if if_rename=='r':
for f in dir1_filename_list:
shutil.copy(os.path.join(image_dir1,f),merge_dir)
if f in same_filename:
os.rename(os.path.join(merge_dir,f), os.path.join(merge_dir,'(1)'+f))
for f2 in dir2_filename_list:
shutil.copy(os.path.join(image_dir2,f2),merge_dir)
if f2 in same_filename:
os.rename(os.path.join(merge_dir,f2), os.path.join(merge_dir,'(2)'+f2))
elif if_rename=='g':
for f3 in dir1_filename_list:
if f3 not in same_filename:
shutil.copy(os.path.join(image_dir1,f3),merge_dir)
for f4 in dir2_filename_list:
if f4 not in same_filename:
shutil.copy(os.path.join(image_dir2,f4),merge_dir)
else:
raise ValueError('Error input: r=rename,g=give up')
else:
for f5 in dir1_filename_list:
shutil.copy(os.path.join(image_dir1,f5),merge_dir)
for f6 in dir2_filename_list:
shutil.copy(os.path.join(image_dir2,f6),merge_dir)
except ValueError as e:
print e
return False
print 'merge success.'
return True
def random_extract(src_dir=SOURCE_DIR, dest_dir=EXTRACT_DIR, num=EXTRACT_NUM,
suffix_list=SUFFIX_LIST, if_copy=False):
"""
randomly extract some files in source directory.
Args:
src_dir: source directory,string.
dest_dir: destnation diretory, if not exist, creat it. string.
num: numbers of file you want to copy or move,integer.
suffix_list: suffix for your wanted file, string list.
if_copy: if True, copy file form src_dir to dst_dir.
Returns:
"""
print 'current work directory:',os.getcwd()
filename_list=[]
try:
if not os.path.exists(src_dir):
raise ValueError('SOURCE_DIR:%s is not exist.'%SOURCE_DIR)
else:
file_list=os.listdir(src_dir)
if len(suffix_list)==0:
filename_list=file_list
else:
if len(file_list)==0:
print 'no file in ',src_dir
return False
else:
for filename in file_list:
if os.path.splitext(filename)[1] in suffix_list:
filename_list.append(filename)
# random copy or cut files.
if len(filename_list) <= num:
raise ValueError('extract numbers error:%d should be less than files in %s'%(num,src_dir))
else:
if not os.path.exists(dest_dir):
print 'dest_dir:',dest_dir,' is not exist.'
os.mkdir(dest_dir)
print 'dest_dir:',dest_dir,'is created.'
random.shuffle(filename_list)
for i in range(num):
if if_copy:
shutil.copy(os.path.join(src_dir,filename_list[i]), dest_dir)
else:
shutil.move(os.path.join(src_dir,filename_list[i]), dest_dir)
except ValueError as e:
print e
return False
print 'great work'
return True
def displayArray(a, fmt='jpeg', rng=[0,255]):
"""Display an array as a picture.
Args:
a: object with array interface.
"""
a = (a - rng[0])/float(rng[1] - rng[0])*255
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
time.sleep(5)
clear_output
return True
def test_display():
# Initial Conditions -- some rain drops hit a pond
N=500
data = np.zeros([N, N], dtype="float32")
# Some rain drops hit a pond at random points
for n in range(40):
a,b = np.random.randint(0, N, 2)
data[a,b] = np.random.uniform()
displayArray(data,rng=[-0.1,0.1])
print 'great work'
return True
def main():
if debug:
print 'debuging'
test_display()
else:
if RUN_FUNCTION==1:
merge()
elif RUN_FUNCTION==2:
random_extract()
elif RUN_FUNCTION==3:
displayArray()
else:
print 'RUN_FUNCTION setup error:'
if __name__=='__main__':
main() | 36.362245 | 115 | 0.580609 |
24f2ea84b32f8d7b9aae3334f00cb8f687cfe5b7 | 7,521 | py | Python | deploy-aks/azext_aks_deploy/dev/aks/up.py | atbagga/cli-extension-aks-up | 0e28e4120a2eecac825a3d4fd11539ec922b1895 | [
"MIT"
] | null | null | null | deploy-aks/azext_aks_deploy/dev/aks/up.py | atbagga/cli-extension-aks-up | 0e28e4120a2eecac825a3d4fd11539ec922b1895 | [
"MIT"
] | 10 | 2019-12-19T08:18:59.000Z | 2020-01-28T09:54:47.000Z | deploy-aks/azext_aks_deploy/dev/aks/up.py | atbagga/cli-extension-aks-up | 0e28e4120a2eecac825a3d4fd11539ec922b1895 | [
"MIT"
] | 4 | 2019-12-09T06:03:04.000Z | 2020-01-02T10:40:39.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from knack.util import CLIError
from azext_aks_deploy.dev.common.git import resolve_repository
from azext_aks_deploy.dev.common.github_api_helper import (Files, get_work_flow_check_runID,
push_files_to_repository,
get_languages_for_repo,
get_github_pat_token,
get_default_branch,
check_file_exists)
from azext_aks_deploy.dev.common.github_workflow_helper import poll_workflow_status, get_new_workflow_yaml_name
from azext_aks_deploy.dev.common.github_azure_secrets import get_azure_credentials
from azext_aks_deploy.dev.common.kubectl import get_deployment_IP_port
from azext_aks_deploy.dev.common.const import (CHECKIN_MESSAGE_AKS, APP_NAME_DEFAULT, APP_NAME_PLACEHOLDER,
ACR_PLACEHOLDER, RG_PLACEHOLDER, PORT_NUMBER_DEFAULT,
CLUSTER_PLACEHOLDER, RELEASE_PLACEHOLDER, RELEASE_NAME)
from azext_aks_deploy.dev.aks.docker_helm_template import get_docker_templates, get_helm_charts
logger = get_logger(__name__)
aks_token_prefix = "AksAppUpCLIExt_"
# pylint: disable=too-many-statements
def aks_deploy(aks_cluster=None, acr=None, repository=None, port=None, branch_name=None,
skip_secrets_generation=False, do_not_wait=False):
"""Build and Deploy to AKS via GitHub actions
:param aks_cluster: Name of the cluster to select for deployment.
:type aks_cluster: str
:param acr: Name of the Azure Container Registry to be used for pushing the image.
:type acr: str
:param repository: GitHub repository URL e.g. https://github.com/azure/azure-cli.
:type repository: str
:param port: Port on which your application runs. Default is 8080
:type port:str
:param branch_name: New branch name to be created to check in files and raise a PR
:type branch_name:str
:param skip_secrets_generation : Skip generation of Azure credentials.
:type skip_secrets_generation: bool
:param do_not_wait : Do not wait for workflow completion.
:type do_not_wait bool
"""
repo_name, repository = resolve_repository(repository)
get_github_pat_token(token_prefix=aks_token_prefix + repo_name, display_warning=True)
logger.warning('Setting up your workflow.')
languages = get_languages_for_repo(repo_name)
if not languages:
raise CLIError('Language detection failed for this repository.')
language = choose_supported_language(languages)
if language:
logger.warning('%s repository detected.', language)
else:
logger.debug('Languages detected : %s', languages)
raise CLIError('The languages in this repository are not yet supported from up command.')
from azext_aks_deploy.dev.common.azure_cli_resources import (get_aks_details,
get_acr_details,
configure_aks_credentials)
cluster_details = get_aks_details(aks_cluster)
logger.debug(cluster_details)
acr_details = get_acr_details(acr)
logger.debug(acr_details)
print('')
files = []
if port is None:
port = PORT_NUMBER_DEFAULT
if 'Dockerfile' not in languages.keys():
# check in docker file and docker ignore
docker_files = get_docker_templates(language, port)
if docker_files:
files = files + docker_files
else:
logger.warning('Using the Dockerfile found in the repository %s', repo_name)
if 'Smarty' not in languages.keys():
# check in helm charts
helm_charts = get_helm_charts(language, acr_details, port)
if helm_charts:
files = files + helm_charts
# create azure service principal and display json on the screen for user to configure it as Github secrets
if not skip_secrets_generation:
get_azure_credentials()
print('')
workflow_files = get_yaml_template_for_repo(cluster_details, acr_details, repo_name)
if workflow_files:
files = files + workflow_files
# File checkin
for file_name in files:
logger.debug("Checkin file path: %s", file_name.path)
logger.debug("Checkin file content: %s", file_name.content)
default_branch = get_default_branch(repo_name)
workflow_commit_sha = push_files_to_repository(
repo_name=repo_name, default_branch=default_branch, files=files,
branch_name=branch_name, message=CHECKIN_MESSAGE_AKS)
if workflow_commit_sha:
print('Creating workflow...')
check_run_id = get_work_flow_check_runID(repo_name, workflow_commit_sha)
workflow_url = 'https://github.com/{repo_id}/runs/{checkID}'.format(repo_id=repo_name,
checkID=check_run_id)
print('GitHub Action workflow has been created - {}'.format(workflow_url))
if not do_not_wait:
poll_workflow_status(repo_name, check_run_id)
configure_aks_credentials(cluster_details['name'], cluster_details['resourceGroup'])
deployment_ip, port = get_deployment_IP_port(RELEASE_NAME, language)
print('Your app is deployed at: http://{ip}:{port}'.format(ip=deployment_ip, port=port))
def get_yaml_template_for_repo(cluster_details, acr_details, repo_name):
files_to_return = []
github_workflow_path = '.github/workflows/'
# Read template file
yaml_file_name = 'main.yml'
workflow_yaml = github_workflow_path + yaml_file_name
if check_file_exists(repo_name, workflow_yaml):
yaml_file_name = get_new_workflow_yaml_name()
workflow_yaml = github_workflow_path + yaml_file_name
from azext_aks_deploy.dev.resources.resourcefiles import DEPLOY_TO_AKS_TEMPLATE
files_to_return.append(Files(path=workflow_yaml,
content=DEPLOY_TO_AKS_TEMPLATE
.replace(APP_NAME_PLACEHOLDER, APP_NAME_DEFAULT)
.replace(ACR_PLACEHOLDER, acr_details['name'])
.replace(CLUSTER_PLACEHOLDER, cluster_details['name'])
.replace(RELEASE_PLACEHOLDER, RELEASE_NAME)
.replace(RG_PLACEHOLDER, cluster_details['resourceGroup'])))
return files_to_return
def choose_supported_language(languages):
# check if one of top three languages are supported or not
list_languages = list(languages.keys())
first_language = list_languages[0]
if first_language in ('JavaScript', 'Java', 'Python'):
return first_language
if len(list_languages) >= 1 and list_languages[1] in ('JavaScript', 'Java', 'Python'):
return list_languages[1]
if len(list_languages) >= 2 and list_languages[2] in ('JavaScript', 'Java', 'Python'):
return list_languages[2]
return None
| 49.807947 | 111 | 0.653636 |
c25277d771538e45826dca99de727dec34e39427 | 1,102 | py | Python | stweet/file_reader/read_from_file.py | enginbozaba/stweet-twitter-api | 060250e00a01ae53c2ca12954719b5efc918e132 | [
"MIT"
] | null | null | null | stweet/file_reader/read_from_file.py | enginbozaba/stweet-twitter-api | 060250e00a01ae53c2ca12954719b5efc918e132 | [
"MIT"
] | null | null | null | stweet/file_reader/read_from_file.py | enginbozaba/stweet-twitter-api | 060250e00a01ae53c2ca12954719b5efc918e132 | [
"MIT"
] | null | null | null | """Methods to read tweets from files."""
import json
from typing import List
import pandas as pd
from ..model.tweet import Tweet
def read_from_csv(file_path: str) -> List[Tweet]:
"""Method to read tweets from csv file."""
df = pd.read_csv(file_path, dtype={
'quoted_status_id_str': str,
'in_reply_to_status_id_str': str,
'in_reply_to_user_id_str': str
})
df.quoted_status_id_str.fillna('', inplace=True)
df.quoted_status_short_url.fillna('', inplace=True)
df.quoted_status_expand_url.fillna('', inplace=True)
df.in_reply_to_status_id_str.fillna('', inplace=True)
df.in_reply_to_user_id_str.fillna('', inplace=True)
df.hashtags.fillna('', inplace=True)
df.urls.fillna('', inplace=True)
df.mentions.fillna('', inplace=True)
return [Tweet.create_tweet_from_flat_dict(row) for _, row in df.iterrows()]
def read_from_json_lines(file_path: str) -> List[Tweet]:
"""Method to read tweets from csv file."""
file = open(file_path, 'r')
return [Tweet.create_tweet_from_dict(json.loads(line)) for line in file.readlines()]
| 33.393939 | 88 | 0.702359 |
fe2db0c6670d327cca552e693aecbce693d02c3e | 15,227 | py | Python | utils/handle_modes.py | Djack1010/sciba_malwareImg2smali | 2bbe1a90b16abdb99a7ac3a3d31cbb84808d8e68 | [
"MIT"
] | null | null | null | utils/handle_modes.py | Djack1010/sciba_malwareImg2smali | 2bbe1a90b16abdb99a7ac3a3d31cbb84808d8e68 | [
"MIT"
] | null | null | null | utils/handle_modes.py | Djack1010/sciba_malwareImg2smali | 2bbe1a90b16abdb99a7ac3a3d31cbb84808d8e68 | [
"MIT"
] | 1 | 2021-04-21T02:33:20.000Z | 2021-04-21T02:33:20.000Z | import tensorflow as tf
from utils.generic_utils import print_log
import utils.config as config
import os
import datetime
import time
from utils.analyzing_data import multiclass_analysis
import pickle
import cv2
import numpy as np
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
# cast to float32 for one_hot encode (otherwise TRUE/FALSE tensor)
return tf.cast(parts[-2] == config.CLASS_NAMES, tf.float32)
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_png(img, channels=config.CHANNELS) # tf.image.decode_jpeg(img, channels=CHANNELS)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
return tf.image.convert_image_dtype(img, tf.float32)
def process_path(file_path, model_input='images', data_type='images'):
label = get_label(file_path)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
if data_type == 'images':
img = decode_img(img)
# resize the image to the desired size.
img = tf.image.resize(img, [config.IMG_DIM, config.IMG_DIM])
if model_input == 'vectors':
# flatten the data to vector
img = tf.reshape(img, [-1])
return img, label
def process_path_vector(file_paths):
vectors = []
labels = []
for fp in file_paths:
label = get_label(fp).numpy()
vector = cv2.cvtColor(cv2.imread(fp.numpy().decode("utf-8")), cv2.COLOR_BGR2GRAY).flatten()
vectors.append(vector)
labels.append(label)
return tf.data.Dataset.from_tensor_slices((np.array(vectors), np.array(labels)))
def prepare_for_training(ds, batch_size, cache=True, shuffle_buffer_size=1000, loop=False):
"""
cache: If isinstance(cache, str), then represents the name of a
directory on the filesystem to use for caching elements in this Dataset.
Otherwise, the dataset will be cached in memory.
"""
# IF it is a small dataset, only load it once and keep it in memory.
# OTHERWISE use `.cache(filename)` to cache preprocessing work for datasets that don't fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.shuffle(buffer_size=shuffle_buffer_size)
# Repeat forever
if loop:
ds = ds.repeat()
ds = ds.batch(batch_size)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=config.AUTOTUNE)
return ds
def prepare_ds(caching, my_set, cache_name, batch_train):
if caching:
# delete previous cache files and store for this execution
caching_file_base = config.main_path + "temp/"
for f in os.listdir(caching_file_base):
if "{}.tfcache".format(cache_name) in f:
os.remove(caching_file_base + f)
set_ds = prepare_for_training(my_set, batch_size=batch_train,
cache=caching_file_base + "{}.tfcache".format(cache_name))
else:
set_ds = prepare_for_training(my_set, batch_size=batch_train)
return set_ds
def get_ds(name_ds, ds_info):
# Load filepaths
file_paths = ds_info[name_ds]
# Create tf.Dataset from filepaths
file_paths_ds = tf.data.Dataset.from_tensor_slices(file_paths).shuffle(len(file_paths))
return file_paths_ds
def initialization(arguments, class_info, ds_info, model_class):
# GLOBAL SETTINGS
config.AUTOTUNE = tf.data.experimental.AUTOTUNE
config.CLASS_NAMES = class_info['class_names']
config.BATCH_SIZE = arguments.batch_size
config.DATA_REQ = model_class.input_type
print("LOADING AND PRE-PROCESSING DATA")
try:
# STATS
size_train, size_val, size_test = class_info['train_size'], class_info['val_size'], class_info['test_size']
class_names, nclasses = class_info['class_names'], class_info['n_classes']
# Print information on log
# EXECUTION Info
mode_info = "load_model = {}".format(arguments.load_model) if arguments.load_model is not None \
else "mode = {}".format(arguments.mode)
print_log("INFO EXECUTION:"
"\n{}\nmodel = {}\ndataset = {}"
"\noutput_model = {}\nepochs = {}\nbatch_size = {}\ncaching = {}"
"\nmodel_input_type = {}"
"\n----------------"
.format(mode_info, arguments.model, arguments.dataset,
arguments.output_model, arguments.epochs, arguments.batch_size, arguments.caching,
config.DATA_REQ))
# DATA Info
print_log("INFO DATA:"
"\nnum_classes = {}\nclass_names= {}\nSize train-val-test= {}-{}-{}"
"\ndata_type = {}\nsize_{}"
.format(nclasses, class_names, size_train, size_val, size_test,
ds_info['ds_type'], "img = {}x{}".format(config.IMG_DIM, config.CHANNELS)
if config.DATA_REQ == "images" else "vec = {}".format(config.VECTOR_DIM)))
for ds_class in class_names:
print_log("{} : {}-{}-{} -> {}".format(ds_class, class_info['info'][ds_class]['TRAIN'],
class_info['info'][ds_class]['VAL'],
class_info['info'][ds_class]['TEST'],
class_info['info'][ds_class]['TOT']))
print_log("----------------")
except KeyError as e:
print("KeyError: {}".format(e))
print("POSSIBLE FIX: run 'python main.py -m DATA -d {}'".format(arguments.dataset))
exit()
def train_val(arguments, model, ds_info):
# Create tf.Dataset from ds_info e filepaths
train_paths_ds, val_paths_ds = get_ds('train_paths', ds_info), get_ds('val_paths', ds_info)
# -------------- TRAINING and VALIDATION part --------------------
# Use Dataset.map to create a dataset of image, label pairs
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
lab_train_ds = train_paths_ds.map(lambda x: process_path(x, model_input=config.DATA_REQ,
data_type=ds_info['ds_type']),
num_parallel_calls=config.AUTOTUNE)
lab_val_ds = val_paths_ds.map(lambda x: process_path(x, model_input=config.DATA_REQ,
data_type=ds_info['ds_type']),
num_parallel_calls=config.AUTOTUNE)
# Caching dataset in memory for big dataset (IF arguments.caching is set)
train_ds, val_ds = prepare_ds(arguments.caching, lab_train_ds, "train", arguments.batch_size),\
prepare_ds(arguments.caching, lab_val_ds, "val", arguments.batch_size)
print_log('Start Training for {} epochs '.format(arguments.epochs), print_on_screen=True)
# Initialize callbacks for Tensorboard
log_fit = config.main_path + "results/tensorboard/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback_fit = tf.keras.callbacks.TensorBoard(log_dir=log_fit, histogram_freq=1)
train_results = model.fit(x=train_ds, batch_size=arguments.batch_size, epochs=arguments.epochs,
validation_data=val_ds, callbacks=[tensorboard_callback_fit])
print_log("\ttrain_loss:{} \n\ttrain_acc:{} \n\ttrain_prec:{} \n\ttrain_rec:{} \n"
"\tval_loss:{} \n\tval_acc:{} \n\tval_prec:{} \n\tval_rec:{}"
.format(train_results.history['loss'], train_results.history['prec'], train_results.history['rec'],
train_results.history['acc'],
train_results.history['val_loss'], train_results.history['val_acc'],
train_results.history['val_prec'], train_results.history['val_rec']))
del train_ds, val_ds
def train_test(arguments, model, class_info, ds_info):
# Create tf.Dataset from ds_info e filepaths
final_training_paths_ds = get_ds('final_training_paths', ds_info)
# -------------- FINAL TRAINING and TEST part --------------------
# Use Dataset.map to create a dataset of image, label pairs
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
lab_final_train_ds = final_training_paths_ds.map(lambda x: process_path(x, model_input=config.DATA_REQ,
data_type=ds_info['ds_type']),
num_parallel_calls=config.AUTOTUNE)
# NB The batch_size for testing is set to 1 to make easier the calculation of the performance results
fin_train_ds = prepare_ds(arguments.caching, lab_final_train_ds, "fin_tr", arguments.batch_size)
# Train the model over the entire total_training set and then test
print_log('Start Final Training for {} epochs '.format(arguments.epochs), print_on_screen=True)
start_training = time.perf_counter()
final_train_results = model.fit(x=fin_train_ds, batch_size=arguments.batch_size, epochs=arguments.epochs)
end_training = time.perf_counter()
print_log("\ttrain_loss:{} \n\ttrain_acc:{} \n\ttrain_prec:{} \n\ttrain_rec:{} \n"
.format(final_train_results.history['loss'], final_train_results.history['prec'],
final_train_results.history['rec'], final_train_results.history['acc']))
print_log("FINAL TRAINING TIME: {} ".format(str(datetime.timedelta(seconds=end_training - start_training))))
del fin_train_ds
# Test the trained model over the test set
test(arguments, model, class_info, ds_info)
def test(arguments, model, class_info, ds_info):
# Create tf.Dataset from ds_info e filepaths
test_paths_ds = get_ds('test_paths', ds_info)
# -------------- TEST part --------------------
# Use Dataset.map to create a dataset of image, label pairs
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
lab_test_ds = test_paths_ds.map(lambda x: process_path(x, model_input=config.DATA_REQ,
data_type=ds_info['ds_type']),
num_parallel_calls=config.AUTOTUNE)
# NB The batch size for test is set to 1 to make easier the calculation of the performance results
test_ds = prepare_ds(arguments.caching, lab_test_ds, "test", 1)
# Test the trained model over the test set
print_log('Start Test', print_on_screen=True)
results = model.evaluate(test_ds)
print_log("\ttest loss:{} \n\ttest accuracy:{}".format(results[0], results[1]), print_on_screen=True)
print_log("\tPrec:{} \n\tRecall:{}".format(results[2], results[3]), print_on_screen=True)
try:
# F-measure calculated as (2 * Prec * Recall)/(Prec + Recall)
print_log("\tF-Measure:{} \n\tAUC:{}"
.format((2 * results[2] * results[3]) / (results[2] + results[3]), results[4]), print_on_screen=True)
except ZeroDivisionError:
print_log("\tF-Measure:{} \n\tAUC:{}"
.format("Error", results[4]), print_on_screen=True)
# TODO: split evaluation and prediction in two phases -> at the moment, the test set is first used by model.evaluate
# to get cumulative information, and then is again used by model.predict to get per class information, thus, the
# test process is repeated two times!
print("Calculating performances per class, it may take a while...")
cm, results_classes, to_print = multiclass_analysis(model, test_ds, class_info['class_names'],
save_fig=config.main_path + "results/figures/CM_{}"
.format(config.timeExec))
print_log("Results per classes", print_on_screen=True)
print_log(to_print, print_on_screen=True)
del test_ds
def save_model(arguments, model):
model_path = config.main_path + 'models_saved/{}_m{}' \
.format(arguments.output_model, arguments.model)
# save model and architecture to single file
tf.keras.models.save_model(model, model_path, overwrite=False)
with open(model_path + '.info', 'wb') \
as filehandle:
store_data = {"CLASS_NAMES": config.CLASS_NAMES, "CHANNELS": config.CHANNELS, "IMG_DIM": config.IMG_DIM}
pickle.dump(store_data, filehandle)
print_log("Model, Weights and Info saved to 'models_saved/{}_m{}[.info]'"
.format(arguments.output_model, arguments.model),
print_on_screen=True)
def load_model(arguments, required_img, required_chan, required_numClasses):
"""
The required_img_chan args is (None,None) when the model is loaded with no specific request on the img size. That is
the case of the apply_gradcam, when we want just to test the model, while in main.py we could also specify an
--image_size arguments and then check if the loaded model fits this requirements
"""
print("LOADING MODEL")
model_path = config.main_path + 'models_saved/{}_m{}'\
.format(arguments.load_model, arguments.model)
if not os.path.isdir(model_path):
print("Model not found in {}, exiting...".format(model_path))
exit()
model = tf.keras.models.load_model(model_path, compile=False)
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['acc', tf.keras.metrics.Precision(name="prec"),
tf.keras.metrics.Recall(name="rec"), tf.keras.metrics.AUC(name='auc')])
with open(model_path + ".info", 'rb') \
as filehandle:
stored_data = pickle.load(filehandle)
class_names = stored_data["CLASS_NAMES"]
channel = stored_data["CHANNELS"]
img_dim = stored_data["IMG_DIM"]
if (required_img is not None and img_dim != required_img) or \
(required_chan is not None and channel != required_chan) or \
(required_numClasses is not None and len(class_names) != required_numClasses):
print("IMG_DIM, CHANNELS and/or number of output classes DIFFERS from the required! Exiting...")
print("Asking img size with {}x{} on a {}-class classification problem, but model {}x{} and outputs on "
"{}-class, exiting...".format(required_img, required_chan, required_numClasses, img_dim, channel,
len(class_names)))
exit()
return model
def save_weights(arguments, model):
print("SAVING WEIGHTS")
model.save_weights(config.main_path + 'models_saved/{}_m{}_weights'.format(arguments.output_model, arguments.model))
def load_weights(arguments, model):
print("LOADING WEIGHTS")
model.load_weights(config.main_path + 'models_saved/{}_m{}_weights'.format(arguments.load_model, arguments.model))
return model
| 46.42378 | 120 | 0.637749 |
0a2649be8d41ac38c2172f78249af3222274b850 | 808 | py | Python | cpt2wgt.py | kevincao91/Tools | 545901c682c20cd06256156dadc75b8e4e7df88c | [
"MIT"
] | null | null | null | cpt2wgt.py | kevincao91/Tools | 545901c682c20cd06256156dadc75b8e4e7df88c | [
"MIT"
] | null | null | null | cpt2wgt.py | kevincao91/Tools | 545901c682c20cd06256156dadc75b8e4e7df88c | [
"MIT"
] | null | null | null | import pickle
import os,sys
cptPath=sys.argv[1]
wgtPath=cptPath
with open(cptPath,'rb') as f:
data = pickle.load(f,encoding='latin1')
keys = data['blobs'].keys()
# needs = ['conv','res','fpn',]
not_needs = ['fc1000','momentum']
output_dic={'blobs':{}}
print('filtered out:')
for key in keys:
keep = True
# for need in needs:
# if key.startswith(need):
# keep=True
for not_need in not_needs:
if not_need in key:
keep=False
break
if keep:
# if 'score' in key:
# print(key)
output_dic['blobs'][key] = data['blobs'][key]
#print(key)
else:
print(' - '+key)
#print(output_dic['blobs'].keys())
with open(wgtPath,'wb') as f:
pickle.dump(output_dic,f,protocol=0)
| 26.064516 | 54 | 0.555693 |
9b7b6c73bab3ea09a06d02c91d959d9f4990dd59 | 3,357 | py | Python | utils/http_helper.py | BlockVigil/ethvigil-cli | 80247260a6b20224f6528edbfee5bdee19baf842 | [
"MIT"
] | null | null | null | utils/http_helper.py | BlockVigil/ethvigil-cli | 80247260a6b20224f6528edbfee5bdee19baf842 | [
"MIT"
] | 9 | 2019-08-06T10:16:04.000Z | 2020-07-23T17:16:27.000Z | utils/http_helper.py | BlockVigil/ethvigil-cli | 80247260a6b20224f6528edbfee5bdee19baf842 | [
"MIT"
] | 1 | 2019-08-07T19:55:58.000Z | 2019-08-07T19:55:58.000Z | import tenacity
import requests
from .exceptions import *
import logging
ev_logger = logging.getLogger('EVCore')
@tenacity.retry(
stop=tenacity.stop_after_delay(60),
wait=tenacity.wait_random_exponential(multiplier=1, max=60),
reraise=True
)
def get(url):
r = requests.get(url)
return r
@tenacity.retry(
stop=tenacity.stop_after_delay(60),
wait=tenacity.wait_random_exponential(multiplier=1, max=60),
reraise=True
)
def post(url, json_params, headers):
r = requests.post(url=url, json=json_params, headers=headers)
return r
def make_http_call(request_type, url, params={}, headers={}):
response = None
request_details = {'requestType': request_type, 'url': url, 'params': params, 'headers': headers}
ev_logger.debug('HTTPRequest')
ev_logger.debug(request_details)
if request_type == 'get':
try:
response = get(url)
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ConnectTimeout
) as e:
raise EVConnectionError("Error connecting to EthVigil API %s" % url, e)
except Exception as e:
raise EVBaseException(e.__str__())
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
request_details.update({'response': {'code': response.status_code, 'text': response.text}})
ev_logger.debug(request_details)
raise EVHTTPError(
request_url=url,
request_body='',
status_code=response.status_code,
response_body=response.text
)
elif request_type == 'post':
try:
response = post(url=url, json_params=params, headers=headers)
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ConnectTimeout
) as e:
raise EVConnectionError("Error connecting to EthVigil API %s" % url, e)
except Exception as e:
raise EVBaseException(e.__str__())
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
request_details.update({'response': {'code': response.status_code, 'text': response.text}})
ev_logger.debug(request_details)
raise EVHTTPError(
request_url=url,
request_body=params,
status_code=response.status_code,
response_body=response.text
)
if not(request_type == 'get' and 'swagger' in url):
return_status = response.status_code
return_content = response.text
request_details.update({'response': {'text': return_content, 'status': return_status}})
ev_logger.debug('HTTPResponse')
ev_logger.debug(request_details)
response = response.json()
api_success = response.get('success', False)
# ignoring GET returns for OpenAPI spec. Does not carry a 'success' field
if not api_success and request_type == 'get' and 'openapi' not in response:
raise EVAPIError(request_url=url, request_body=params, status_code=return_status,
response_body=return_content)
return response
| 36.48913 | 103 | 0.629729 |
88c21a27f8a95bdaec77a347bbb4a9982284c536 | 2,366 | py | Python | dependencies/scons-config/build/lib/sconsconfig/packages/libcellml.py | maierbn/opendihu | 577650e2f6b36a7306766b0f4176f8124458cbf0 | [
"MIT"
] | 17 | 2018-11-25T19:29:34.000Z | 2021-09-20T04:46:22.000Z | dependencies/scons-config/build/lib/sconsconfig/packages/libcellml.py | maierbn/opendihu | 577650e2f6b36a7306766b0f4176f8124458cbf0 | [
"MIT"
] | 1 | 2020-11-12T15:15:58.000Z | 2020-12-29T15:29:24.000Z | dependencies/scons-config/build/lib/sconsconfig/packages/libcellml.py | maierbn/opendihu | 577650e2f6b36a7306766b0f4176f8124458cbf0 | [
"MIT"
] | 4 | 2018-10-17T12:18:10.000Z | 2021-05-28T13:24:20.000Z | import sys, os, multiprocessing
from .Package import Package
class libcellml(Package):
def __init__(self, **kwargs):
defaults = {
'download_url': 'https://github.com/cellml/libcellml/archive/develop.zip',
}
defaults.update(kwargs)
super(libcellml, self).__init__(**defaults)
self.ext = '.cpp'
#self.sub_dirs = [
# ('include/mysql', 'lib'),
# ('include/mysql', 'lib64'),
#]
#self.headers = ['mysql.h']
#self.libs = ['mysqlclient']
#self.extra_libs = ['lapack', 'blas']
self.check_text = r'''
#include <iostream>
#include <cstdlib>
#include <libcellml>
#include <memory>
int main(int argc, char* argv[]) {
const std::string e =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<model xmlns=\"http://www.cellml.org/cellml/2.0#\">"
"<units name=\"valid_name\"/>"
"</model>";
libcellml::Model m;
libcellml::UnitsPtr u = std::make_shared<libcellml::Units>();;
u->setName("valid_name");
m.addUnits(u);
libcellml::Printer printer;
const std::string a = printer.printModel(m);
return EXIT_SUCCESS;
}
'''
# get number of available processors
p = multiprocessing.cpu_count()
# Setup the build handler.
self.set_build_handler([
"mkdir -p ${PREFIX}/../build ",
"cd ${PREFIX}/../build && cmake \
-DBUILD_TYPE=Release \
-DINSTALL_PREFIX=${PREFIX} \
-DLIBXML2_LIBRARIES=../../libxml2/install/lib/libxml2.a \
-DLIBXML2_INCLUDE_DIR=../../libxml2/install/include \
-DLIBCELLML_BUILD_SHARED=Off \
-DLIBCELLML_COVERAGE=Off \
-DLIBCELLML_MEMCHECK=Off \
-DLIBCELLML_UNIT_TESTS=Off \
${SOURCE_DIR} && make && make install",
#"ln -s ${PREFIX}/include/libcellml/module/libcellml ${PREFIX}/include/libcellml"
])
self.number_output_lines = 84
self.libs = ["cellml"]
self.headers = ["libcellml/model.h", "libcellml"]
def check(self, ctx):
env = ctx.env
ctx.Message('Checking for libcellml ... ')
self.check_options(env)
res = super(libcellml, self).check(ctx)
self.check_required(res[0], ctx)
ctx.Result(res[0])
return res[0]
| 30.333333 | 91 | 0.567202 |
ef5fff25de33604927224c0ac96fb42363731cec | 3,108 | py | Python | python/pyserial-3.0/serial/__init__.py | gotnone/hwa | 4648cf6072a06552d22cbf6498b35f3e24ce38d5 | [
"BSD-3-Clause"
] | 25 | 2015-08-05T12:36:24.000Z | 2021-03-26T01:51:58.000Z | python/pyserial-3.0/serial/__init__.py | gotnone/hwa | 4648cf6072a06552d22cbf6498b35f3e24ce38d5 | [
"BSD-3-Clause"
] | 3 | 2021-06-08T21:06:32.000Z | 2022-01-13T02:22:38.000Z | python/pyserial-3.0/serial/__init__.py | gotnone/hwa | 4648cf6072a06552d22cbf6498b35f3e24ce38d5 | [
"BSD-3-Clause"
] | 4 | 2016-09-18T08:58:35.000Z | 2020-07-16T11:43:29.000Z | #!/usr/bin/env python
#
# This is a wrapper module for different platform implementations
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import importlib
import sys
from serial.serialutil import *
#~ SerialBase, SerialException, to_bytes, iterbytes
VERSION = '3.0'
if sys.platform == 'cli':
from serial.serialcli import Serial
else:
import os
# chose an implementation, depending on os
if os.name == 'nt': # sys.platform == 'win32':
from serial.serialwin32 import Serial
elif os.name == 'posix':
from serial.serialposix import Serial, PosixPollSerial, VTIMESerial
elif os.name == 'java':
from serial.serialjava import Serial
else:
raise ImportError("Sorry: no implementation for your platform ('%s') available" % (os.name,))
protocol_handler_packages = [
'serial.urlhandler',
]
def serial_for_url(url, *args, **kwargs):
"""\
Get an instance of the Serial class, depending on port/url. The port is not
opened when the keyword parameter 'do_not_open' is true, by default it
is. All other parameters are directly passed to the __init__ method when
the port is instantiated.
The list of package names that is searched for protocol handlers is kept in
``protocol_handler_packages``.
e.g. we want to support a URL ``foobar://``. A module
``my_handlers.protocol_foobar`` is provided by the user. Then
``protocol_handler_packages.append("my_handlers")`` would extend the search
path so that ``serial_for_url("foobar://"))`` would work.
"""
# check and remove extra parameter to not confuse the Serial class
do_open = not kwargs.pop('do_not_open', False)
# the default is to use the native implementation
klass = Serial
try:
url_lowercase = url.lower()
except AttributeError:
# it's not a string, use default
pass
else:
# if it is an URL, try to import the handler module from the list of possible packages
if '://' in url_lowercase:
protocol = url_lowercase.split('://', 1)[0]
module_name = '.protocol_%s' % (protocol,)
for package_name in protocol_handler_packages:
try:
package = importlib.import_module(package_name)
handler_module = importlib.import_module(module_name, package_name)
except ImportError:
continue
else:
if hasattr(handler_module, 'serial_class_for_url'):
url, klass = handler_module.serial_class_for_url(url)
else:
klass = handler_module.Serial
break
else:
raise ValueError('invalid URL, protocol %r not known' % (protocol,))
# instantiate and open when desired
instance = klass(None, *args, **kwargs)
instance.port = url
if do_open:
instance.open()
return instance
| 35.724138 | 101 | 0.638996 |
e358c78ad1e4865876af38c4f113de09d02e9c46 | 8,923 | py | Python | contrib/linearize/linearize-data.py | gaoncoin/gaon | 93f94f7d268c0f9abd0b7ef82791945349fcb40c | [
"MIT"
] | null | null | null | contrib/linearize/linearize-data.py | gaoncoin/gaon | 93f94f7d268c0f9abd0b7ef82791945349fcb40c | [
"MIT"
] | null | null | null | contrib/linearize/linearize-data.py | gaoncoin/gaon | 93f94f7d268c0f9abd0b7ef82791945349fcb40c | [
"MIT"
] | 4 | 2018-02-27T04:30:24.000Z | 2018-04-01T14:59:24.000Z | #!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import gaon_hash
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = gaon_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'cee2caff'
if 'genesis' not in settings:
settings['genesis'] = '00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| 29.160131 | 108 | 0.68923 |
5d601d77d2480cfe7db743c10b4d226218e601aa | 9,628 | py | Python | ssd.pytorch/eval_coco2.py | transcendentsky/detection_models | 185f4bcccd5ab2c2f8edac37c76a9ccc47f73883 | [
"Apache-2.0"
] | null | null | null | ssd.pytorch/eval_coco2.py | transcendentsky/detection_models | 185f4bcccd5ab2c2f8edac37c76a9ccc47f73883 | [
"Apache-2.0"
] | null | null | null | ssd.pytorch/eval_coco2.py | transcendentsky/detection_models | 185f4bcccd5ab2c2f8edac37c76a9ccc47f73883 | [
"Apache-2.0"
] | null | null | null | """Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap
from data.coco_test import *
import torch.utils.data as data
from ssd import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
COCOroot = os.path.join("/media/trans/mnt", "data/coco/")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets',
'Main', '{:s}.txt')
YEAR = '2007'
devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
print("[DEBUG] length: ", num_images)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap) + 1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer(), 'total':Timer() }
output_dir = get_output_dir('ssd300_coco_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
if False:
_t['total'].tic()
for i in range(num_images):
# print("[DEBUG] print i = ", i)
im, gt, h, w = dataset.__getitem__(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
# print("______________________\n", x.size())
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time), end='\r')
total_time = _t['total'].toc()
print("Total time: ", total_time, "\t ms: ", total_time / float(num_images))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
dataset.evaluate_detections(all_boxes, output_dir)
#
# def evaluate_detections(box_list, output_dir, dataset):
# write_voc_results_file(box_list, dataset)
# do_python_eval(output_dir)
def main(trained_model):
# load net
net = build_ssd('test', 300, 80)
# print(net)
net = net.cuda() # initialize SSD
net.load_state_dict(torch.load(trained_model))
# resume_ckpt(trained_model,net)
net.eval()
print('Finished loading model!')
# load data
# dataset = VOCDetection(args.voc_root, [('2007', set_type)],
# BaseTransform(300, dataset_mean),
# VOCAnnotationTransform())
dataset = COCODetection(root=COCOroot,
image_sets=[('2014', 'minival')],
preproc=BaseTransform(300, dataset_mean))
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
def resume_ckpt(trained_model, net):
checkpoint = torch.load(trained_model)
# print(list(checkpoint.items())[0][0])
if 'module.' in list(checkpoint.items())[0][0]:
pretrained_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint.items())}
checkpoint = pretrained_dict
for k, v in checkpoint.items():
if 'vgg.0' in k:
print(k, v)
if __name__ == "__main__":
for i in range(10):
pth = "results/DataParallel/mixupCOCO/1002/ssd300_COCO_" + str(i + 150) + ".pth"
print(pth)
# modelname = 'weights/lm/ssd300_VOC_0.pth'
# modelname = 'weights/ssd300_mAP_77.43_v2.pth'
# modelname = 'weights/mixup/ssd300_VOC_' + str(i+23) + '0.pth'
iii = i + 150
modelname = "results/DataParallel/mixup005/1002/ssd300_VOC_" + str(iii) + ".pth"
print("----------------------------------\n"
" EVAL modelname: {}\n"
"----------------------------------\n".format(modelname))
main(modelname)
# AP for aeroplane = 0.8207
# AP for bicycle = 0.8568
# AP for bird = 0.7546
# AP for boat = 0.6952
# AP for bottle = 0.5019
# AP for bus = 0.8479
# AP for car = 0.8584
# AP for cat = 0.8734
# AP for chair = 0.6136
# AP for cow = 0.8243
# AP for diningtable = 0.7906
# AP for dog = 0.8566
# AP for horse = 0.8714
# AP for motorbike = 0.8403
# AP for person = 0.7895
# AP for pottedplant = 0.5069
# AP for sheep = 0.7767
# AP for sofa = 0.7894
# AP for train = 0.8623
# AP for tvmonitor = 0.7670
# Mean AP = 0.7749
| 34.758123 | 94 | 0.590258 |
782be4731e2a9e3da151a60d04d74ec089c10767 | 252 | py | Python | manage.py | anehx/anonboard-backend | 564a1af702054980bb1bb6e6864fe205c4396388 | [
"MIT"
] | null | null | null | manage.py | anehx/anonboard-backend | 564a1af702054980bb1bb6e6864fe205c4396388 | [
"MIT"
] | null | null | null | manage.py | anehx/anonboard-backend | 564a1af702054980bb1bb6e6864fe205c4396388 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "anonboard.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.909091 | 73 | 0.77381 |
7b20d84941f2f4b436ebe47f1222d3cdb6362270 | 2,352 | py | Python | test/test_app_error.py | smortex/puppetboard | 35486e8e49725e48bfb76707f5f1f2d5537eb190 | [
"Apache-2.0"
] | null | null | null | test/test_app_error.py | smortex/puppetboard | 35486e8e49725e48bfb76707f5f1f2d5537eb190 | [
"Apache-2.0"
] | null | null | null | test/test_app_error.py | smortex/puppetboard | 35486e8e49725e48bfb76707f5f1f2d5537eb190 | [
"Apache-2.0"
] | 1 | 2019-10-30T13:07:20.000Z | 2019-10-30T13:07:20.000Z | import pytest
from flask import Flask, current_app
from puppetboard import app
from bs4 import BeautifulSoup
@pytest.fixture
def mock_puppetdb_environments(mocker):
environemnts = [
{'name': 'production'},
{'name': 'staging'}
]
return mocker.patch.object(app.puppetdb, 'environments',
return_value=environemnts)
def test_error_no_content():
result = app.no_content(None)
assert result[0] == ''
assert result[1] == 204
def test_error_bad_request(mock_puppetdb_environments):
with app.app.test_request_context():
(output, error_code) = app.bad_request(None)
soup = BeautifulSoup(output, 'html.parser')
assert 'The request sent to PuppetDB was invalid' in soup.p.text
assert error_code == 400
def test_error_forbidden(mock_puppetdb_environments):
with app.app.test_request_context():
(output, error_code) = app.forbidden(None)
soup = BeautifulSoup(output, 'html.parser')
long_string = "%s %s" % ('What you were looking for has',
'been disabled by the administrator')
assert long_string in soup.p.text
assert error_code == 403
def test_error_not_found(mock_puppetdb_environments):
with app.app.test_request_context():
(output, error_code) = app.not_found(None)
soup = BeautifulSoup(output, 'html.parser')
long_string = "%s %s" % ('What you were looking for could not',
'be found in PuppetDB.')
assert long_string in soup.p.text
assert error_code == 404
def test_error_precond(mock_puppetdb_environments):
with app.app.test_request_context():
(output, error_code) = app.precond_failed(None)
soup = BeautifulSoup(output, 'html.parser')
long_string = "%s %s" % ('You\'ve configured Puppetboard with an API',
'version that does not support this feature.')
assert long_string in soup.p.text
assert error_code == 412
def test_error_server(mock_puppetdb_environments):
with app.app.test_request_context():
(output, error_code) = app.server_error(None)
soup = BeautifulSoup(output, 'html.parser')
assert 'Internal Server Error' in soup.h2.text
assert error_code == 500
| 31.783784 | 79 | 0.64966 |
57847e4fa1b843acc42778fa93c94d98a4690880 | 5,158 | py | Python | powerline/segments/shell.py | elventear/powerline | cddfc364c1bf269f721dfdbd765cde2649a3410d | [
"MIT"
] | 3 | 2016-08-31T23:09:58.000Z | 2016-08-31T23:10:00.000Z | powerline/segments/shell.py | elventear/powerline | cddfc364c1bf269f721dfdbd765cde2649a3410d | [
"MIT"
] | null | null | null | powerline/segments/shell.py | elventear/powerline | cddfc364c1bf269f721dfdbd765cde2649a3410d | [
"MIT"
] | 1 | 2015-01-09T21:02:50.000Z | 2015-01-09T21:02:50.000Z | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.theme import requires_segment_info
from powerline.segments import with_docstring
from powerline.segments.common.env import CwdSegment
from powerline.lib.unicode import out_u
@requires_segment_info
def jobnum(pl, segment_info, show_zero=False):
'''Return the number of jobs.
:param bool show_zero:
If False (default) shows nothing if there are no jobs. Otherwise shows
zero for no jobs.
'''
jobnum = segment_info['args'].jobnum
if jobnum is None or (not show_zero and jobnum == 0):
return None
else:
return str(jobnum)
@requires_segment_info
def last_status(pl, segment_info):
'''Return last exit code.
Highlight groups used: ``exit_fail``
'''
if not segment_info['args'].last_exit_code:
return None
return [{'contents': str(segment_info['args'].last_exit_code), 'highlight_groups': ['exit_fail']}]
@requires_segment_info
def last_pipe_status(pl, segment_info):
'''Return last pipe status.
Highlight groups used: ``exit_fail``, ``exit_success``
'''
last_pipe_status = segment_info['args'].last_pipe_status
if any(last_pipe_status):
return [
{
'contents': str(status),
'highlight_groups': ['exit_fail' if status else 'exit_success'],
'draw_inner_divider': True
}
for status in last_pipe_status
]
else:
return None
@requires_segment_info
def mode(pl, segment_info, override={'vicmd': 'COMMND', 'viins': 'INSERT'}, default=None):
'''Return the current mode.
:param dict override:
dict for overriding mode strings.
:param str default:
If current mode is equal to this string then this segment will not get
displayed. If not specified the value is taken from
``$POWERLINE_DEFAULT_MODE`` variable. This variable is set by zsh
bindings for any mode that does not start from ``vi``.
'''
mode = segment_info.get('mode', None)
if not mode:
pl.debug('No mode specified')
return None
default = default or segment_info.get('default_mode', None)
if mode == default:
return None
try:
return override[mode]
except KeyError:
# Note: with zsh line editor you can emulate as much modes as you wish.
# Thus having unknown mode is not an error: maybe just some developer
# added support for his own zle widgets. As there is no built-in mode()
# function like in VimL and mode is likely be defined by our code or by
# somebody knowing what he is doing there is absolutely no need in
# keeping translations dictionary.
return mode.upper()
@requires_segment_info
def continuation(pl, segment_info, omit_cmdsubst=True, right_align=False, renames={}):
'''Display parser state.
:param bool omit_cmdsubst:
Do not display cmdsubst parser state if it is the last one.
:param bool right_align:
Align to the right.
:param dict renames:
Rename states: ``{old_name : new_name}``. If ``new_name`` is ``None``
then given state is not displayed.
Highlight groups used: ``continuation``, ``continuation:current``.
'''
if not segment_info.get('parser_state'):
return [{
'contents': '',
'width': 'auto',
'highlight_groups': ['continuation:current', 'continuation'],
}]
ret = []
for state in segment_info['parser_state'].split():
state = renames.get(state, state)
if state:
ret.append({
'contents': state,
'highlight_groups': ['continuation'],
'draw_inner_divider': True,
})
if omit_cmdsubst and ret[-1]['contents'] == 'cmdsubst':
ret.pop(-1)
if not ret:
ret.append({
'contents': ''
})
if right_align:
ret[0].update(width='auto', align='r')
ret[-1]['highlight_groups'] = ['continuation:current', 'continuation']
else:
ret[-1].update(width='auto', align='l', highlight_groups=['continuation:current', 'continuation'])
return ret
@requires_segment_info
class ShellCwdSegment(CwdSegment):
def get_shortened_path(self, pl, segment_info, use_shortened_path=True, **kwargs):
if use_shortened_path:
try:
return out_u(segment_info['shortened_path'])
except KeyError:
pass
return super(ShellCwdSegment, self).get_shortened_path(pl, segment_info, **kwargs)
cwd = with_docstring(ShellCwdSegment(),
'''Return the current working directory.
Returns a segment list to create a breadcrumb-like effect.
:param int dir_shorten_len:
shorten parent directory names to this length (e.g.
:file:`/long/path/to/powerline` → :file:`/l/p/t/powerline`)
:param int dir_limit_depth:
limit directory depth to this number (e.g.
:file:`/long/path/to/powerline` → :file:`⋯/to/powerline`)
:param bool use_path_separator:
Use path separator in place of soft divider.
:param bool use_shortened_path:
Use path from shortened_path ``--renderer-arg`` argument. If this argument
is present ``shorten_home`` argument is ignored.
:param bool shorten_home:
Shorten home directory to ``~``.
:param str ellipsis:
Specifies what to use in place of omitted directories. Use None to not
show this subsegment at all.
Divider highlight group used: ``cwd:divider``.
Highlight groups used: ``cwd:current_folder`` or ``cwd``. It is recommended to define all highlight groups.
''')
| 29.988372 | 107 | 0.72722 |
4df7001d03e7bdc5d8c76b4ef7271f1792d0d534 | 8,491 | py | Python | services/web/server/src/simcore_service_webserver/version_control_handlers_snapshots.py | Surfict/osparc-simcore | 1e0b89574ec17ecb089674f9e5daa83d624430c8 | [
"MIT"
] | null | null | null | services/web/server/src/simcore_service_webserver/version_control_handlers_snapshots.py | Surfict/osparc-simcore | 1e0b89574ec17ecb089674f9e5daa83d624430c8 | [
"MIT"
] | 16 | 2021-10-04T20:31:52.000Z | 2022-03-14T04:31:25.000Z | services/web/server/src/simcore_service_webserver/version_control_handlers_snapshots.py | Surfict/osparc-simcore | 1e0b89574ec17ecb089674f9e5daa83d624430c8 | [
"MIT"
] | null | null | null | import logging
import warnings
from datetime import datetime
from typing import List, Optional
from uuid import UUID
from aiohttp import web
from pydantic.decorator import validate_arguments
from ._meta import api_version_prefix as vtag
from .constants import RQT_USERID_KEY
from .login.decorators import login_required
from .projects import projects_api
from .security_decorators import permission_required
from .utils_aiohttp import rename_routes_as_handler_function, view_routes
from .version_control_core_snapshots import ProjectDict, take_snapshot
from .version_control_db_snapshots import ProjectsRepository, SnapshotsRepository
from .version_control_handlers_base import (
create_url_for_function,
enveloped_response,
handle_request_errors,
)
from .version_control_models_snapshots import Snapshot, SnapshotItem, SnapshotPatch
logger = logging.getLogger(__name__)
warnings.warn(
"version_control_*_snapshots.py modules are the first generation of vc."
"It is just temporarily kept it functional until it gets fully replaced",
DeprecationWarning,
)
# FIXME: access rights using same approach as in access_layer.py in storage.
# A user can only check snapshots (subresource) of its project (parent resource)
# API ROUTES HANDLERS ---------------------------------------------------------
routes = web.RouteTableDef()
@routes.post(
f"/{vtag}/projects/{{project_id}}/snapshots", name="create_project_snapshot_handler"
)
@login_required
@permission_required("project.create")
@handle_request_errors
async def create_project_snapshot_handler(request: web.Request):
snapshots_repo = SnapshotsRepository(request)
projects_repo = ProjectsRepository(request)
user_id = request[RQT_USERID_KEY]
url_for = create_url_for_function(request)
@validate_arguments
async def _create_snapshot(
project_id: UUID,
snapshot_label: Optional[str] = None,
) -> Snapshot:
# fetch parent's project
parent: ProjectDict = await projects_api.get_project_for_user(
request.app,
f"{project_id}",
user_id,
include_templates=False,
include_state=False,
)
# fetch snapshot if any
parent_uuid: UUID = UUID(parent["uuid"])
snapshot_timestamp: datetime = parent["lastChangeDate"]
snapshot_orm = await snapshots_repo.get(
parent_uuid=parent_uuid, created_at=snapshot_timestamp
)
# FIXME: if exists but different name?
if not snapshot_orm:
# take a snapshot of the parent project and commit to db
project: ProjectDict
snapshot: Snapshot
project, snapshot = await take_snapshot(
parent,
snapshot_label=snapshot_label,
)
# FIXME: Atomic?? project and snapshot shall be created in the same transaction!!
# FIXME: project returned might already exist, then return same snaphot
await projects_repo.create(project)
snapshot_orm = await snapshots_repo.create(snapshot)
return Snapshot.from_orm(snapshot_orm)
snapshot = await _create_snapshot(
project_id=request.match_info["project_id"], # type: ignore
snapshot_label=request.query.get("snapshot_label"),
)
data = SnapshotItem.from_snapshot(snapshot, url_for, prefix=__name__)
return enveloped_response(data, status_cls=web.HTTPCreated)
@routes.get(f"/{vtag}/projects/{{project_id}}/snapshots")
@login_required
@permission_required("project.read")
@handle_request_errors
async def list_project_snapshots_handler(request: web.Request):
"""
Lists references on project snapshots
"""
snapshots_repo = SnapshotsRepository(request)
url_for = create_url_for_function(request)
@validate_arguments
async def _list_snapshots(project_id: UUID) -> List[Snapshot]:
# project_id is param-project?
# TODO: add pagination
# TODO: optimizaiton will grow snapshots of a project with time!
#
snapshots_orm = await snapshots_repo.list_all(project_id)
# snapshots:
# - ordered (iterations!)
# - have a parent project with all the parametrization
return [Snapshot.from_orm(obj) for obj in snapshots_orm]
snapshots: List[Snapshot] = await _list_snapshots(
project_id=request.match_info["project_id"], # type: ignore
)
# TODO: async for snapshot in await list_snapshot is the same?
data = [
SnapshotItem.from_snapshot(snp, url_for, prefix=__name__) for snp in snapshots
]
return enveloped_response(data)
@routes.get(
f"/{vtag}/projects/{{project_id}}/snapshots/{{snapshot_id}}",
)
@login_required
@permission_required("project.read")
@handle_request_errors
async def get_project_snapshot_handler(request: web.Request):
snapshots_repo = SnapshotsRepository(request)
url_for = create_url_for_function(request)
@validate_arguments
async def _get_snapshot(project_id: UUID, snapshot_id: str) -> Snapshot:
snapshot_orm = await snapshots_repo.get_by_id(project_id, int(snapshot_id))
if not snapshot_orm:
raise web.HTTPNotFound(
reason=f"snapshot {snapshot_id} for project {project_id} not found"
)
return Snapshot.from_orm(snapshot_orm)
snapshot = await _get_snapshot(
project_id=request.match_info["project_id"], # type: ignore
snapshot_id=request.match_info["snapshot_id"],
)
data = SnapshotItem.from_snapshot(snapshot, url_for, prefix=__name__)
return enveloped_response(data)
@routes.delete(
f"/{vtag}/projects/{{project_id}}/snapshots/{{snapshot_id}}",
name="delete_project_snapshot_handler",
)
@login_required
@permission_required("project.delete")
@handle_request_errors
async def delete_project_snapshot_handler(request: web.Request) -> None:
snapshots_repo = SnapshotsRepository(request)
@validate_arguments
async def _delete_snapshot(project_id: UUID, snapshot_id: int):
# - Deletes first the associated project (both data and document)
# when the latter deletes the project from the database, postgres will
# finally delete
# - Since projects_api.delete_project is a fire&forget and might take time,
snapshot_uuid = await snapshots_repo.mark_as_deleted(
project_id, int(snapshot_id)
)
if not snapshot_uuid:
raise web.HTTPNotFound(
reason=f"snapshot {snapshot_id} for project {project_id} not found"
)
assert snapshots_repo.user_id is not None
await projects_api.delete_project(
request.app, f"{snapshot_uuid}", snapshots_repo.user_id
)
await _delete_snapshot(
project_id=request.match_info["project_id"], # type: ignore
snapshot_id=request.match_info["snapshot_id"], # type: ignore
)
raise web.HTTPNoContent()
@routes.patch(
f"/{vtag}/projects/{{project_id}}/snapshots/{{snapshot_id}}",
name="patch_project_snapshot_handler",
)
@login_required
@permission_required("project.update")
@handle_request_errors
async def patch_project_snapshot_handler(request: web.Request):
snapshots_repo = SnapshotsRepository(request)
url_for = create_url_for_function(request)
@validate_arguments
async def _update_snapshot(
project_id: UUID, snapshot_id: int, update: SnapshotPatch
):
snapshot_orm = await snapshots_repo.update_name(
project_id, snapshot_id, name=update.label
)
if not snapshot_orm:
raise web.HTTPNotFound(
reason=f"snapshot {snapshot_id} for project {project_id} not found"
)
return Snapshot.from_orm(snapshot_orm)
snapshot = await _update_snapshot(
project_id=request.match_info["project_id"], # type: ignore
snapshot_id=request.match_info["snapshot_id"], # type: ignore
update=SnapshotPatch.parse_obj(await request.json()),
# TODO: skip_return_updated
)
data = SnapshotItem.from_snapshot(snapshot, url_for, prefix=__name__)
return enveloped_response(data)
# WARNING: changes in handlers naming will have an effect
# since they are in sync with operation_id (checked in tests)
rename_routes_as_handler_function(routes, prefix=__name__)
logger.debug("Routes collected in %s:\n %s", __name__, view_routes(routes))
| 34.237903 | 93 | 0.706866 |
0523ec4e3ab636964e30ad70ef9e82ff01344a21 | 35,843 | py | Python | dogechia/full_node/full_node_store.py | hagbardcelene/doge-chia | 72bdf0a7b20a579fe4645f0cb132955e181e1c44 | [
"Apache-2.0"
] | 27 | 2021-07-06T16:33:50.000Z | 2022-02-19T21:11:25.000Z | dogechia/full_node/full_node_store.py | hagbardcelene/doge-chia | 72bdf0a7b20a579fe4645f0cb132955e181e1c44 | [
"Apache-2.0"
] | 15 | 2021-07-07T02:32:59.000Z | 2021-10-15T21:19:51.000Z | dogechia/full_node/full_node_store.py | hagbardcelene/doge-chia | 72bdf0a7b20a579fe4645f0cb132955e181e1c44 | [
"Apache-2.0"
] | 12 | 2021-07-08T15:36:20.000Z | 2022-03-15T08:34:01.000Z | import asyncio
import dataclasses
import logging
import time
from typing import Dict, List, Optional, Set, Tuple
from dogechia.consensus.block_record import BlockRecord
from dogechia.consensus.blockchain_interface import BlockchainInterface
from dogechia.consensus.constants import ConsensusConstants
from dogechia.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from dogechia.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from dogechia.consensus.multiprocess_validation import PreValidationResult
from dogechia.consensus.pot_iterations import calculate_sp_interval_iters
from dogechia.full_node.signage_point import SignagePoint
from dogechia.protocols import timelord_protocol
from dogechia.server.outbound_message import Message
from dogechia.types.blockchain_format.classgroup import ClassgroupElement
from dogechia.types.blockchain_format.sized_bytes import bytes32
from dogechia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from dogechia.types.blockchain_format.vdf import VDFInfo
from dogechia.types.end_of_slot_bundle import EndOfSubSlotBundle
from dogechia.types.full_block import FullBlock
from dogechia.types.generator_types import CompressorArg
from dogechia.types.unfinished_block import UnfinishedBlock
from dogechia.util.ints import uint8, uint32, uint64, uint128
from dogechia.util.lru_cache import LRUCache
log = logging.getLogger(__name__)
class FullNodeStore:
constants: ConsensusConstants
# Blocks which we have created, but don't have plot signatures yet, so not yet "unfinished blocks"
candidate_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
candidate_backup_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
# Header hashes of unfinished blocks that we have seen recently
seen_unfinished_blocks: set
# Unfinished blocks, keyed from reward hash
unfinished_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]
# Finished slots and sps from the peak's slot onwards
# We store all 32 SPs for each slot, starting as 32 Nones and filling them as we go
# Also stores the total iters at the end of slot
# For the first sub-slot, EndOfSlotBundle is None
finished_sub_slots: List[Tuple[Optional[EndOfSubSlotBundle], List[Optional[SignagePoint]], uint128]]
# These caches maintain objects which depend on infused blocks in the reward chain, that we
# might receive before the blocks themselves. The dict keys are the reward chain challenge hashes.
# End of slots which depend on infusions that we don't have
future_eos_cache: Dict[bytes32, List[EndOfSubSlotBundle]]
# Signage points which depend on infusions that we don't have
future_sp_cache: Dict[bytes32, List[Tuple[uint8, SignagePoint]]]
# Infusion point VDFs which depend on infusions that we don't have
future_ip_cache: Dict[bytes32, List[timelord_protocol.NewInfusionPointVDF]]
# This stores the time that each key was added to the future cache, so we can clear old keys
future_cache_key_times: Dict[bytes32, int]
# These recent caches are for pooling support
recent_signage_points: LRUCache
recent_eos: LRUCache
# Partial hashes of unfinished blocks we are requesting
requesting_unfinished_blocks: Set[bytes32]
previous_generator: Optional[CompressorArg]
pending_tx_request: Dict[bytes32, bytes32] # tx_id: peer_id
peers_with_tx: Dict[bytes32, Set[bytes32]] # tx_id: Set[peer_ids}
tx_fetch_tasks: Dict[bytes32, asyncio.Task] # Task id: task
serialized_wp_message: Optional[Message]
serialized_wp_message_tip: Optional[bytes32]
def __init__(self, constants: ConsensusConstants):
self.candidate_blocks = {}
self.candidate_backup_blocks = {}
self.seen_unfinished_blocks = set()
self.unfinished_blocks = {}
self.finished_sub_slots = []
self.future_eos_cache = {}
self.future_sp_cache = {}
self.future_ip_cache = {}
self.recent_signage_points = LRUCache(500)
self.recent_eos = LRUCache(50)
self.requesting_unfinished_blocks = set()
self.previous_generator = None
self.future_cache_key_times = {}
self.constants = constants
self.clear_slots()
self.initialize_genesis_sub_slot()
self.pending_tx_request = {}
self.peers_with_tx = {}
self.tx_fetch_tasks = {}
self.serialized_wp_message = None
self.serialized_wp_message_tip = None
def add_candidate_block(
self, quality_string: bytes32, height: uint32, unfinished_block: UnfinishedBlock, backup: bool = False
):
if backup:
self.candidate_backup_blocks[quality_string] = (height, unfinished_block)
else:
self.candidate_blocks[quality_string] = (height, unfinished_block)
def get_candidate_block(
self, quality_string: bytes32, backup: bool = False
) -> Optional[Tuple[uint32, UnfinishedBlock]]:
if backup:
return self.candidate_backup_blocks.get(quality_string, None)
else:
return self.candidate_blocks.get(quality_string, None)
def clear_candidate_blocks_below(self, height: uint32) -> None:
del_keys = []
for key, value in self.candidate_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_blocks[key]
except KeyError:
pass
del_keys = []
for key, value in self.candidate_backup_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_backup_blocks[key]
except KeyError:
pass
def seen_unfinished_block(self, object_hash: bytes32) -> bool:
if object_hash in self.seen_unfinished_blocks:
return True
self.seen_unfinished_blocks.add(object_hash)
return False
def clear_seen_unfinished_blocks(self) -> None:
self.seen_unfinished_blocks.clear()
def add_unfinished_block(
self, height: uint32, unfinished_block: UnfinishedBlock, result: PreValidationResult
) -> None:
self.unfinished_blocks[unfinished_block.partial_hash] = (height, unfinished_block, result)
def get_unfinished_block(self, unfinished_reward_hash: bytes32) -> Optional[UnfinishedBlock]:
result = self.unfinished_blocks.get(unfinished_reward_hash, None)
if result is None:
return None
return result[1]
def get_unfinished_block_result(self, unfinished_reward_hash: bytes32) -> Optional[PreValidationResult]:
result = self.unfinished_blocks.get(unfinished_reward_hash, None)
if result is None:
return None
return result[2]
def get_unfinished_blocks(self) -> Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]:
return self.unfinished_blocks
def clear_unfinished_blocks_below(self, height: uint32) -> None:
del_keys: List[bytes32] = []
for partial_reward_hash, (unf_height, unfinished_block, _) in self.unfinished_blocks.items():
if unf_height < height:
del_keys.append(partial_reward_hash)
for del_key in del_keys:
del self.unfinished_blocks[del_key]
def remove_unfinished_block(self, partial_reward_hash: bytes32):
if partial_reward_hash in self.unfinished_blocks:
del self.unfinished_blocks[partial_reward_hash]
def add_to_future_ip(self, infusion_point: timelord_protocol.NewInfusionPointVDF):
ch: bytes32 = infusion_point.reward_chain_ip_vdf.challenge
if ch not in self.future_ip_cache:
self.future_ip_cache[ch] = []
self.future_ip_cache[ch].append(infusion_point)
def in_future_sp_cache(self, signage_point: SignagePoint, index: uint8) -> bool:
if signage_point.rc_vdf is None:
return False
if signage_point.rc_vdf.challenge not in self.future_sp_cache:
return False
for cache_index, cache_sp in self.future_sp_cache[signage_point.rc_vdf.challenge]:
if cache_index == index and cache_sp.rc_vdf == signage_point.rc_vdf:
return True
return False
def add_to_future_sp(self, signage_point: SignagePoint, index: uint8):
# We are missing a block here
if (
signage_point.cc_vdf is None
or signage_point.rc_vdf is None
or signage_point.cc_proof is None
or signage_point.rc_proof is None
):
return None
if signage_point.rc_vdf.challenge not in self.future_sp_cache:
self.future_sp_cache[signage_point.rc_vdf.challenge] = []
if self.in_future_sp_cache(signage_point, index):
return None
self.future_cache_key_times[signage_point.rc_vdf.challenge] = int(time.time())
self.future_sp_cache[signage_point.rc_vdf.challenge].append((index, signage_point))
log.info(f"Don't have rc hash {signage_point.rc_vdf.challenge}. caching signage point {index}.")
def get_future_ip(self, rc_challenge_hash: bytes32) -> List[timelord_protocol.NewInfusionPointVDF]:
return self.future_ip_cache.get(rc_challenge_hash, [])
def clear_old_cache_entries(self) -> None:
current_time: int = int(time.time())
remove_keys: List[bytes32] = []
for rc_hash, time_added in self.future_cache_key_times.items():
if current_time - time_added > 3600:
remove_keys.append(rc_hash)
for k in remove_keys:
self.future_cache_key_times.pop(k, None)
self.future_ip_cache.pop(k, [])
self.future_eos_cache.pop(k, [])
self.future_sp_cache.pop(k, [])
def clear_slots(self):
self.finished_sub_slots.clear()
def get_sub_slot(self, challenge_hash: bytes32) -> Optional[Tuple[EndOfSubSlotBundle, int, uint128]]:
assert len(self.finished_sub_slots) >= 1
for index, (sub_slot, _, total_iters) in enumerate(self.finished_sub_slots):
if sub_slot is not None and sub_slot.challenge_chain.get_hash() == challenge_hash:
return sub_slot, index, total_iters
return None
def initialize_genesis_sub_slot(self):
self.clear_slots()
self.finished_sub_slots = [(None, [None] * self.constants.NUM_SPS_SUB_SLOT, uint128(0))]
def new_finished_sub_slot(
self,
eos: EndOfSubSlotBundle,
blocks: BlockchainInterface,
peak: Optional[BlockRecord],
peak_full_block: Optional[FullBlock],
) -> Optional[List[timelord_protocol.NewInfusionPointVDF]]:
"""
Returns false if not added. Returns a list if added. The list contains all infusion points that depended
on this sub slot
"""
assert len(self.finished_sub_slots) >= 1
assert (peak is None) == (peak_full_block is None)
last_slot, _, last_slot_iters = self.finished_sub_slots[-1]
cc_challenge: bytes32 = (
last_slot.challenge_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE
)
rc_challenge: bytes32 = (
last_slot.reward_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE
)
icc_challenge: Optional[bytes32] = None
icc_iters: Optional[uint64] = None
# Skip if already present
for slot, _, _ in self.finished_sub_slots:
if slot == eos:
return []
if eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge != cc_challenge:
# This slot does not append to our next slot
# This prevent other peers from appending fake VDFs to our cache
return None
if peak is None:
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
else:
sub_slot_iters = peak.sub_slot_iters
total_iters = uint128(last_slot_iters + sub_slot_iters)
if peak is not None and peak.total_iters > last_slot_iters:
# Peak is in this slot
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
cc_start_element = peak.challenge_vdf_output
iters = uint64(total_iters - peak.total_iters)
if peak.reward_infusion_new_challenge != rc_challenge:
# We don't have this challenge hash yet
if rc_challenge not in self.future_eos_cache:
self.future_eos_cache[rc_challenge] = []
self.future_eos_cache[rc_challenge].append(eos)
self.future_cache_key_times[rc_challenge] = int(time.time())
log.info(f"Don't have challenge hash {rc_challenge}, caching EOS")
return None
if peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
icc_start_element = None
elif peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
icc_start_element = ClassgroupElement.get_default_element()
else:
icc_start_element = peak.infused_challenge_vdf_output
if peak.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
curr = peak
while not curr.first_in_sub_slot and not curr.is_challenge_block(self.constants):
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
icc_challenge = curr.challenge_block_info_hash
icc_iters = uint64(total_iters - curr.total_iters)
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters = sub_slot_iters
assert icc_challenge is not None
if can_finish_sub_and_full_epoch(
self.constants,
blocks,
peak.height,
peak.prev_hash,
peak.deficit,
peak.sub_epoch_summary_included is not None,
)[0]:
assert peak_full_block is not None
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants, blocks, peak.required_iters, peak_full_block, True
)
if ses is not None:
if eos.challenge_chain.subepoch_summary_hash != ses.get_hash():
log.warning(f"SES not correct {ses.get_hash(), eos.challenge_chain}")
return None
else:
if eos.challenge_chain.subepoch_summary_hash is not None:
log.warning("SES not correct, should be None")
return None
else:
# This is on an empty slot
cc_start_element = ClassgroupElement.get_default_element()
icc_start_element = ClassgroupElement.get_default_element()
iters = sub_slot_iters
icc_iters = sub_slot_iters
# The icc should only be present if the previous slot had an icc too, and not deficit 0 (just finished slot)
icc_challenge = (
last_slot.infused_challenge_chain.get_hash()
if last_slot is not None
and last_slot.infused_challenge_chain is not None
and last_slot.reward_chain.deficit != self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else None
)
# Validate cc VDF
partial_cc_vdf_info = VDFInfo(
cc_challenge,
iters,
eos.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
if eos.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=sub_slot_iters,
):
return None
if (
not eos.proofs.challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.challenge_chain_slot_proof.is_valid(
self.constants,
cc_start_element,
partial_cc_vdf_info,
)
):
return None
if (
eos.proofs.challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.challenge_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None
# Validate reward chain VDF
if not eos.proofs.reward_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.reward_chain.end_of_slot_vdf,
VDFInfo(rc_challenge, iters, eos.reward_chain.end_of_slot_vdf.output),
):
return None
if icc_challenge is not None:
assert icc_start_element is not None
assert icc_iters is not None
assert eos.infused_challenge_chain is not None
assert eos.infused_challenge_chain is not None
assert eos.proofs.infused_challenge_chain_slot_proof is not None
partial_icc_vdf_info = VDFInfo(
icc_challenge,
iters,
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_icc_vdf_info,
number_of_iterations=icc_iters,
):
return None
if (
not eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
self.constants, icc_start_element, partial_icc_vdf_info
)
):
return None
if (
eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None
else:
# This is the first sub slot and it's empty, therefore there is no ICC
if eos.infused_challenge_chain is not None or eos.proofs.infused_challenge_chain_slot_proof is not None:
return None
self.finished_sub_slots.append((eos, [None] * self.constants.NUM_SPS_SUB_SLOT, total_iters))
new_cc_hash = eos.challenge_chain.get_hash()
self.recent_eos.put(new_cc_hash, (eos, time.time()))
new_ips: List[timelord_protocol.NewInfusionPointVDF] = []
for ip in self.future_ip_cache.get(eos.reward_chain.get_hash(), []):
new_ips.append(ip)
return new_ips
def new_signage_point(
self,
index: uint8,
blocks: BlockchainInterface,
peak: Optional[BlockRecord],
next_sub_slot_iters: uint64,
signage_point: SignagePoint,
skip_vdf_validation=False,
) -> bool:
"""
Returns true if sp successfully added
"""
assert len(self.finished_sub_slots) >= 1
if peak is None or peak.height < 2:
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
else:
sub_slot_iters = peak.sub_slot_iters
# If we don't have this slot, return False
if index == 0 or index >= self.constants.NUM_SPS_SUB_SLOT:
return False
assert (
signage_point.cc_vdf is not None
and signage_point.cc_proof is not None
and signage_point.rc_vdf is not None
and signage_point.rc_proof is not None
)
for sub_slot, sp_arr, start_ss_total_iters in self.finished_sub_slots:
if sub_slot is None:
assert start_ss_total_iters == 0
ss_challenge_hash = self.constants.GENESIS_CHALLENGE
ss_reward_hash = self.constants.GENESIS_CHALLENGE
else:
ss_challenge_hash = sub_slot.challenge_chain.get_hash()
ss_reward_hash = sub_slot.reward_chain.get_hash()
if ss_challenge_hash == signage_point.cc_vdf.challenge:
# If we do have this slot, find the Prev block from SP and validate SP
if peak is not None and start_ss_total_iters > peak.total_iters:
# We are in a future sub slot from the peak, so maybe there is a new SSI
checkpoint_size: uint64 = uint64(next_sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
delta_iters: uint64 = uint64(checkpoint_size * index)
future_sub_slot: bool = True
else:
# We are not in a future sub slot from the peak, so there is no new SSI
checkpoint_size = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
delta_iters = uint64(checkpoint_size * index)
future_sub_slot = False
sp_total_iters = start_ss_total_iters + delta_iters
curr = peak
if peak is None or future_sub_slot:
check_from_start_of_ss = True
else:
check_from_start_of_ss = False
while (
curr is not None
and curr.total_iters > start_ss_total_iters
and curr.total_iters > sp_total_iters
):
if curr.first_in_sub_slot:
# Did not find a block where it's iters are before our sp_total_iters, in this ss
check_from_start_of_ss = True
break
curr = blocks.block_record(curr.prev_hash)
if check_from_start_of_ss:
# Check VDFs from start of sub slot
cc_vdf_info_expected = VDFInfo(
ss_challenge_hash,
delta_iters,
signage_point.cc_vdf.output,
)
rc_vdf_info_expected = VDFInfo(
ss_reward_hash,
delta_iters,
signage_point.rc_vdf.output,
)
else:
# Check VDFs from curr
assert curr is not None
cc_vdf_info_expected = VDFInfo(
ss_challenge_hash,
uint64(sp_total_iters - curr.total_iters),
signage_point.cc_vdf.output,
)
rc_vdf_info_expected = VDFInfo(
curr.reward_infusion_new_challenge,
uint64(sp_total_iters - curr.total_iters),
signage_point.rc_vdf.output,
)
if not signage_point.cc_vdf == dataclasses.replace(
cc_vdf_info_expected, number_of_iterations=delta_iters
):
self.add_to_future_sp(signage_point, index)
return False
if check_from_start_of_ss:
start_ele = ClassgroupElement.get_default_element()
else:
assert curr is not None
start_ele = curr.challenge_vdf_output
if not skip_vdf_validation:
if not signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
self.constants,
start_ele,
cc_vdf_info_expected,
):
self.add_to_future_sp(signage_point, index)
return False
if signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
signage_point.cc_vdf,
):
self.add_to_future_sp(signage_point, index)
return False
if rc_vdf_info_expected.challenge != signage_point.rc_vdf.challenge:
# This signage point is probably outdated
self.add_to_future_sp(signage_point, index)
return False
if not skip_vdf_validation:
if not signage_point.rc_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
signage_point.rc_vdf,
rc_vdf_info_expected,
):
self.add_to_future_sp(signage_point, index)
return False
sp_arr[index] = signage_point
self.recent_signage_points.put(signage_point.cc_vdf.output.get_hash(), (signage_point, time.time()))
return True
self.add_to_future_sp(signage_point, index)
return False
def get_signage_point(self, cc_signage_point: bytes32) -> Optional[SignagePoint]:
assert len(self.finished_sub_slots) >= 1
if cc_signage_point == self.constants.GENESIS_CHALLENGE:
return SignagePoint(None, None, None, None)
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None and sub_slot.challenge_chain.get_hash() == cc_signage_point:
return SignagePoint(None, None, None, None)
for sp in sps:
if sp is not None:
assert sp.cc_vdf is not None
if sp.cc_vdf.output.get_hash() == cc_signage_point:
return sp
return None
def get_signage_point_by_index(
self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32
) -> Optional[SignagePoint]:
assert len(self.finished_sub_slots) >= 1
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None:
cc_hash = sub_slot.challenge_chain.get_hash()
else:
cc_hash = self.constants.GENESIS_CHALLENGE
if cc_hash == challenge_hash:
if index == 0:
return SignagePoint(None, None, None, None)
sp: Optional[SignagePoint] = sps[index]
if sp is not None:
assert sp.rc_vdf is not None
if sp.rc_vdf.challenge == last_rc_infusion:
return sp
return None
return None
def have_newer_signage_point(self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32) -> bool:
"""
Returns true if we have a signage point at this index which is based on a newer infusion.
"""
assert len(self.finished_sub_slots) >= 1
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None:
cc_hash = sub_slot.challenge_chain.get_hash()
else:
cc_hash = self.constants.GENESIS_CHALLENGE
if cc_hash == challenge_hash:
found_rc_hash = False
for i in range(0, index):
sp: Optional[SignagePoint] = sps[i]
if sp is not None and sp.rc_vdf is not None and sp.rc_vdf.challenge == last_rc_infusion:
found_rc_hash = True
sp = sps[index]
if (
found_rc_hash
and sp is not None
and sp.rc_vdf is not None
and sp.rc_vdf.challenge != last_rc_infusion
):
return True
return False
def new_peak(
self,
peak: BlockRecord,
peak_full_block: FullBlock,
sp_sub_slot: Optional[EndOfSubSlotBundle], # None if not overflow, or in first/second slot
ip_sub_slot: Optional[EndOfSubSlotBundle], # None if in first slot
fork_block: Optional[BlockRecord],
blocks: BlockchainInterface,
) -> Tuple[
Optional[EndOfSubSlotBundle], List[Tuple[uint8, SignagePoint]], List[timelord_protocol.NewInfusionPointVDF]
]:
"""
If the peak is an overflow block, must provide two sub-slots: one for the current sub-slot and one for
the prev sub-slot (since we still might get more blocks with an sp in the previous sub-slot)
Results in either one or two sub-slots in finished_sub_slots.
"""
assert len(self.finished_sub_slots) >= 1
if ip_sub_slot is None:
# We are still in the first sub-slot, no new sub slots ey
self.initialize_genesis_sub_slot()
else:
# This is not the first sub-slot in the chain
sp_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT
ip_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT
if fork_block is not None and fork_block.sub_slot_iters != peak.sub_slot_iters:
# If there was a reorg and a difficulty adjustment, just clear all the slots
self.clear_slots()
else:
interval_iters = calculate_sp_interval_iters(self.constants, peak.sub_slot_iters)
# If it's not a reorg, or there is a reorg on the same difficulty, we can keep signage points
# that we had before, in the cache
for index, (sub_slot, sps, total_iters) in enumerate(self.finished_sub_slots):
if sub_slot is None:
continue
if fork_block is None:
# If this is not a reorg, we still want to remove signage points after the new peak
fork_block = peak
replaced_sps: List[Optional[SignagePoint]] = [] # index 0 is the end of sub slot
for i, sp in enumerate(sps):
if (total_iters + i * interval_iters) < fork_block.total_iters:
# Sps before the fork point as still valid
replaced_sps.append(sp)
else:
if sp is not None:
log.debug(
f"Reverting {i} {(total_iters + i * interval_iters)} {fork_block.total_iters}"
)
# Sps after the fork point should be removed
replaced_sps.append(None)
assert len(sps) == len(replaced_sps)
if sub_slot == sp_sub_slot:
sp_sub_slot_sps = replaced_sps
if sub_slot == ip_sub_slot:
ip_sub_slot_sps = replaced_sps
self.clear_slots()
prev_sub_slot_total_iters = peak.sp_sub_slot_total_iters(self.constants)
if sp_sub_slot is not None or prev_sub_slot_total_iters == 0:
assert peak.overflow or prev_sub_slot_total_iters
self.finished_sub_slots.append((sp_sub_slot, sp_sub_slot_sps, prev_sub_slot_total_iters))
ip_sub_slot_total_iters = peak.ip_sub_slot_total_iters(self.constants)
self.finished_sub_slots.append((ip_sub_slot, ip_sub_slot_sps, ip_sub_slot_total_iters))
new_eos: Optional[EndOfSubSlotBundle] = None
new_sps: List[Tuple[uint8, SignagePoint]] = []
new_ips: List[timelord_protocol.NewInfusionPointVDF] = []
future_eos: List[EndOfSubSlotBundle] = self.future_eos_cache.get(peak.reward_infusion_new_challenge, []).copy()
for eos in future_eos:
if self.new_finished_sub_slot(eos, blocks, peak, peak_full_block) is not None:
new_eos = eos
break
future_sps: List[Tuple[uint8, SignagePoint]] = self.future_sp_cache.get(
peak.reward_infusion_new_challenge, []
).copy()
for index, sp in future_sps:
assert sp.cc_vdf is not None
if self.new_signage_point(index, blocks, peak, peak.sub_slot_iters, sp):
new_sps.append((index, sp))
for ip in self.future_ip_cache.get(peak.reward_infusion_new_challenge, []):
new_ips.append(ip)
self.future_eos_cache.pop(peak.reward_infusion_new_challenge, [])
self.future_sp_cache.pop(peak.reward_infusion_new_challenge, [])
self.future_ip_cache.pop(peak.reward_infusion_new_challenge, [])
for eos_op, _, _ in self.finished_sub_slots:
if eos_op is not None:
self.recent_eos.put(eos_op.challenge_chain.get_hash(), (eos_op, time.time()))
return new_eos, new_sps, new_ips
def get_finished_sub_slots(
self,
block_records: BlockchainInterface,
prev_b: Optional[BlockRecord],
last_challenge_to_add: bytes32,
) -> Optional[List[EndOfSubSlotBundle]]:
"""
Retrieves the EndOfSubSlotBundles that are in the store either:
1. From the starting challenge if prev_b is None
2. That are not included in the blockchain with peak of prev_b if prev_b is not None
Stops at last_challenge
"""
if prev_b is None:
# The first sub slot must be None
assert self.finished_sub_slots[0][0] is None
challenge_in_chain: bytes32 = self.constants.GENESIS_CHALLENGE
else:
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = block_records.block_record(curr.prev_hash)
assert curr is not None
assert curr.finished_challenge_slot_hashes is not None
challenge_in_chain = curr.finished_challenge_slot_hashes[-1]
if last_challenge_to_add == challenge_in_chain:
# No additional slots to add
return []
collected_sub_slots: List[EndOfSubSlotBundle] = []
found_last_challenge = False
found_connecting_challenge = False
for sub_slot, sps, total_iters in self.finished_sub_slots[1:]:
assert sub_slot is not None
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge == challenge_in_chain:
found_connecting_challenge = True
if found_connecting_challenge:
collected_sub_slots.append(sub_slot)
if found_connecting_challenge and sub_slot.challenge_chain.get_hash() == last_challenge_to_add:
found_last_challenge = True
break
if not found_last_challenge:
log.warning(f"Did not find hash {last_challenge_to_add} connected to " f"{challenge_in_chain}")
return None
return collected_sub_slots
| 45.142317 | 120 | 0.623078 |
88015c354dbf9c10d1c28642a3566eb74433a461 | 130 | py | Python | excelpro3/tfdxls2xlsx/__init__.py | majiashu/data-processing | 772dc341657c416cef7046473ed3030efa200e33 | [
"MIT"
] | 1 | 2020-07-31T15:13:02.000Z | 2020-07-31T15:13:02.000Z | excelpro3/tfdxls2xlsx/__init__.py | majiashu/data-processing | 772dc341657c416cef7046473ed3030efa200e33 | [
"MIT"
] | null | null | null | excelpro3/tfdxls2xlsx/__init__.py | majiashu/data-processing | 772dc341657c416cef7046473ed3030efa200e33 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
__init__.py.py
Created on 2018/8/28 13:25
Copyright (c) 2018/8/28,
@author: 马家树(majstx@163.com)
""" | 18.571429 | 28 | 0.630769 |
c1d6080ec6cd1eec3aa7eb1c13de5ee4bf5f995c | 3,434 | py | Python | cargonet/visualization/nxplot.py | romnnn/rail-stgcnn | 7710e654106fb161301df40dd570dea1767e8256 | [
"MIT"
] | 2 | 2020-09-06T04:19:46.000Z | 2020-12-06T06:07:01.000Z | cargonet/visualization/nxplot.py | romnnn/rail-stgcnn | 7710e654106fb161301df40dd570dea1767e8256 | [
"MIT"
] | null | null | null | cargonet/visualization/nxplot.py | romnnn/rail-stgcnn | 7710e654106fb161301df40dd570dea1767e8256 | [
"MIT"
] | 1 | 2021-05-15T15:37:30.000Z | 2021-05-15T15:37:30.000Z | import os
import uuid
import click
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from cargonet.preprocessing.graphs.tgraph import TransportGraph
from cargonet.visualization.tplot import TransportPlot
class NXTransportPlot(TransportPlot):
def draw_edges(self, route, pos, **options):
nx.draw_networkx_edges(route, pos, **options)
def draw_nodes(self, route, pos, **options):
nx.draw_networkx_nodes(route, pos, **options)
def draw_node_labels(self, route, pos, **options):
nx.draw_networkx_labels(route, pos, **options)
def draw_edge_labels(self, route, pos, **options):
nx.draw_networkx_edge_labels(route, pos, **options)
def draw_cities(self, **options):
cities = {
"Berlin": (13.422937, 52.511991),
"Hamburg": (9.215304, 53.712586),
"Munich": (11.541843, 48.154955),
"Cologne": (6.967279, 50.957886),
}
for c, pos in cities.items():
self.ax.plot([pos[1]], [pos[0]], "o", color="black") # Point
self.ax.text(
pos[1],
pos[0],
c,
verticalalignment="bottom",
horizontalalignment="left",
color="black",
fontsize=10,
)
def plot(self, close=True):
self._plot()
filepath = self.get_filepath(
filepath=self.filepath, filename=self.filename, random=self.save
)
if filepath is not None:
self.write(filepath=filepath)
if self.show:
plt.show()
if close:
plt.close()
def _plot(self):
size, aspect = 10, 1.5
self.fig, self.ax = plt.subplots(figsize=(size * aspect, size))
fig, ax = self.fig, self.ax
if self.axis:
ax.set_xlabel("latitude", fontsize=self.fontsize)
ax.set_ylabel("longitude", fontsize=self.fontsize)
if self.title:
fig.suptitle(self.title, fontsize=1.5 * self.fontsize, fontweight="bold")
if self.subtitle:
ax.set_title(self.subtitle, fontsize=self.fontsize)
self.vmin, self.vmax = None, None
if self.colorbar_range is not None:
self.vmin, self.vmax = self.colorbar_range
self.draw_route(self.g, good_color="black", bad_color="red")
if self.live is not None:
self.draw_route(self.live, good_color="green", bad_color="blue")
self.draw_cities()
if self.delay:
sm = plt.cm.ScalarMappable(
cmap=self.colormap, norm=plt.Normalize(vmin=self.vmin, vmax=self.vmax)
)
sm.set_array([])
cbar = fig.colorbar(sm, ax=ax)
cbar.ax.set_ylabel(
"delay in minutes", fontsize=self.fontsize
) # , rotation=270)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
if self.bbox is not None:
# Use custom ranges as therfore scaling on the axis
lats = self.bbox[0][0], self.bbox[1][0]
lons = self.bbox[0][1], self.bbox[1][1]
plt.xlim(min(lats), max(lats))
plt.ylim(min(lons), max(lons))
else:
# Use equal scaling on the axis
plt.axis("equal")
if self.check:
plt.legend(loc="upper right", fontsize=self.fontsize)
| 32.396226 | 86 | 0.571928 |
7501c78c56b17a9f706af5689768c0324eecd747 | 3,040 | py | Python | source/src/network/GameController/protocols/python/gamestate.py | Dr-MunirShah/black-sheep | e908203d9516e01f90f4ed4c796cf4143d0df0c0 | [
"MIT"
] | 7 | 2019-07-25T10:06:31.000Z | 2021-02-20T06:00:51.000Z | source/src/network/GameController/protocols/python/gamestate.py | Dr-MunirShah/black-sheep | e908203d9516e01f90f4ed4c796cf4143d0df0c0 | [
"MIT"
] | null | null | null | source/src/network/GameController/protocols/python/gamestate.py | Dr-MunirShah/black-sheep | e908203d9516e01f90f4ed4c796cf4143d0df0c0 | [
"MIT"
] | 1 | 2019-08-31T23:32:02.000Z | 2019-08-31T23:32:02.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from construct import Byte, Struct, Enum, Bytes, Const, Array, Renamed, Int16ul
Short = Int16ul
RobotInfo = "robot_info" / Struct(
# define NONE 0
# define PENALTY_HL_KID_BALL_MANIPULATION 1
# define PENALTY_HL_KID_PHYSICAL_CONTACT 2
# define PENALTY_HL_KID_ILLEGAL_ATTACK 3
# define PENALTY_HL_KID_ILLEGAL_DEFENSE 4
# define PENALTY_HL_KID_REQUEST_FOR_PICKUP 5
# define PENALTY_HL_KID_REQUEST_FOR_SERVICE 6
# define PENALTY_HL_KID_REQUEST_FOR_PICKUP_2_SERVICE 7
# define MANUAL 15
"penalty" / Byte,
"secs_till_unpenalised" / Byte,
"number_of_yellow_cards" / Byte,
"number_of_red_cards" / Byte
)
TeamInfo = "team" / Struct(
"team_number" / Byte,
"team_color" / Enum(Byte,
BLUE=0,
RED=1,
YELLOW=2,
BLACK=3,
WHITE=4,
GREEN=5,
ORANGE=6,
PURPLE=7,
BROWN=8,
GRAY=9
),
"score" / Byte,
"penalty_shot" / Byte, # penalty shot counter
"single_shots" / Short, # bits represent penalty shot success
"coach_sequence" / Byte,
"coach_message" / Bytes(253),
Renamed("coach", RobotInfo),
"players" / Array(11, RobotInfo)
)
GameState = "gamedata" / Struct(
"header" / Const(Bytes(4), b'RGme'),
"version" / Const(Short, 12),
"packet_number" / Byte,
"players_per_team" / Byte,
"game_type" / Byte,
"game_state" / Enum(Byte,
STATE_INITIAL=0,
# auf startposition gehen
STATE_READY=1,
# bereithalten
STATE_SET=2,
# spielen
STATE_PLAYING=3,
# spiel zu ende
STATE_FINISHED=4
),
"first_half" / Byte,
"kick_of_team" / Byte,
"secondary_state" / Enum(Byte,
STATE_NORMAL=0,
STATE_PENALTYSHOOT=1,
STATE_OVERTIME=2,
STATE_TIMEOUT=3,
STATE_DIRECT_FREEKICK=4,
STATE_INDIRECT_FREEKICK=5,
STATE_PENALTYKICK=6,
DROPBALL=128,
UNKNOWN=255
),
"secondary_state_info" / Bytes(4),
"drop_in_team" / Byte,
"drop_in_time" / Short,
"seconds_remaining" / Short,
"secondary_seconds_remaining" / Short,
Array(2, Renamed("teams", TeamInfo))
)
GAME_CONTROLLER_RESPONSE_VERSION = 2
ReturnData = "returndata" / Struct(
"header" / Const(Bytes(4), b"RGrt"),
"version" / Const(Byte, 2),
"team" / Byte,
"player" / Byte,
"message" / Byte
)
| 32.340426 | 79 | 0.497368 |
ca1db0fe4573831b8b98fc26a41a9b078d882fb8 | 1,182 | py | Python | LeetCode/Python3/Array/1002. Find Common Characters.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | LeetCode/Python3/Array/1002. Find Common Characters.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | LeetCode/Python3/Array/1002. Find Common Characters.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # Given an array A of strings made only from lowercase letters,
# return a list of all characters that show up in all strings within the list (including duplicates).
# For example, if a character occurs 3 times in all strings but not 4 times,
# you need to include that character three times in the final answer.
# You may return the answer in any order.
# Example 1:
# Input: ["bella","label","roller"]
# Output: ["e","l","l"]
# Example 2:
# Input: ["cool","lock","cook"]
# Output: ["c","o"]
# Note:
# 1 <= A.length <= 100
# 1 <= A[i].length <= 100
# A[i][j] is a lowercase letter
import collections
class Solution(object):
def commonChars(self, A):
"""
:type A: List[str]
:rtype: List[str]
"""
# 直接统计 O(A.length * A[i].length)
# 用Counter类来统计每个单词中字母的数量,然后取交集即可
# 时间复杂度分析:
# 每个单词中的每个字母遍历一遍,所以总的时间复杂度是全部字母个数 O(A.length * A[i].length)
if len(A) == 1:
return list(A[0])
c = collections.Counter(A[0])
for i in range(1, len(A)):
t = collections.Counter(A[i])
c = c & t
res = []
for i in c:
res += i * c[i]
return res
| 30.307692 | 103 | 0.57445 |
120b541d4cd8f42e931cce2606554831c5966aff | 2,005 | py | Python | shadho/workers/workqueue.py | Gayan225/shadho | e22d9764749dcd4788974ca614dc57a47e433651 | [
"MIT"
] | null | null | null | shadho/workers/workqueue.py | Gayan225/shadho | e22d9764749dcd4788974ca614dc57a47e433651 | [
"MIT"
] | null | null | null | shadho/workers/workqueue.py | Gayan225/shadho | e22d9764749dcd4788974ca614dc57a47e433651 | [
"MIT"
] | null | null | null | """Utilities for submitting and starting Work Queue workers.
"""
import argparse
import os
import re
import subprocess
from shadho.configuration import ShadhoConfig
def parse_args(args=None):
p = argparse.ArgumentParser(
description='Start a Work Queue worker and connect to SHADHO.')
p.add_argument('-M', '--master', type=str,
help='name of the Work Queue master to connect to')
p.add_argument('-u', '--user', type=str, default=os.environ['USER'],
help='name of the user running the Work Queue master')
p.add_argument('--timeout', type=int,
help='amount of time worker idles before exiting')
p.add_argument('--cores', type=int, default=1,
help='the number of cores for the worker to use;' +
' pass 0 to use all available cores')
p.add_argument('--feature', type=str, nargs='*', default=[],
help='user specified feature to advertise, e.g. GPU model name')
return p.parse_args(args)
def shadho_wq_worker(args=None, config=None):
"""Start a Work Queue worker."""
if config is None:
config = ShadhoConfig()
if args is None:
cmd_args = ''
else:
cmd_args = f'-M {args.master} --cores {args.cores}'
for feature in args.feature:
cmd_args += f' --feature {feature}'
if not re.search(r'(^|[\s])-M([\s]|$)', cmd_args):
cmd_args = ' '.join([cmd_args, '-M', config.workqueue.name]).strip()
if not re.search(r'[\s]*-M[\s][\S]*' + args.user + r'.*[\s]*', cmd_args):
print('Replacing')
cmd_args = re.sub(r'(^|[\s]*)(.*-M[\s])([\S]+)([\s]*.*$)',
r'\1\2\3-' + args.user + r'\4',
cmd_args)
executable = os.path.join(config.shadho_dir, 'bin', 'work_queue_worker')
print(cmd_args)
subprocess.run([executable] + cmd_args.split(), stderr=subprocess.STDOUT)
def main():
args = parse_args()
shadho_wq_worker(args=args)
if __name__ == '__main__':
main()
| 29.925373 | 77 | 0.600998 |
0ce8057c3d9200b7c0048e9643de2495c77b7dd5 | 4,614 | py | Python | pyvolt/errors.py | Gael-devv/Pyvolt | 1d84ba95f1fd3f959a933051c25f8a3e60500c5d | [
"MIT"
] | null | null | null | pyvolt/errors.py | Gael-devv/Pyvolt | 1d84ba95f1fd3f959a933051c25f8a3e60500c5d | [
"MIT"
] | null | null | null | pyvolt/errors.py | Gael-devv/Pyvolt | 1d84ba95f1fd3f959a933051c25f8a3e60500c5d | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Dict, List, Optional, TYPE_CHECKING, Any, Tuple, Union
if TYPE_CHECKING:
from aiohttp import ClientResponse, ClientWebSocketResponse
try:
from requests import Response
_ResponseType = Union[ClientResponse, Response]
except ModuleNotFoundError:
_ResponseType = ClientResponse
__all__ = (
"PyvoltException",
"HTTPException",
"Forbidden",
"NotFound",
"RevoltServerError",
"InvalidArgument",
"ConnectionClosed"
)
class PyvoltException(Exception):
"""Base exception class for pyvolt
Ideally speaking, this could be caught to handle any exceptions raised from this library.
"""
def _flatten_error_dict(d: Dict[str, Any], key: str = '') -> Dict[str, str]:
items: List[Tuple[str, str]] = []
for k, v in d.items():
new_key = key + '.' + k if key else k
if isinstance(v, dict):
try:
_errors: List[Dict[str, Any]] = v['_errors']
except KeyError:
items.extend(_flatten_error_dict(v, new_key).items())
else:
items.append((new_key, ' '.join(x.get('message', '') for x in _errors)))
else:
items.append((new_key, v))
return dict(items)
class HTTPException(PyvoltException):
"""Exception that's raised when an HTTP request operation fails.
Attributes
------------
response: :class:`aiohttp.ClientResponse`
The response of the failed HTTP request. This is an
instance of :class:`aiohttp.ClientResponse`. In some cases
this could also be a :class:`requests.Response`.
text: :class:`str`
The text of the error. Could be an empty string.
status: :class:`int`
The status code of the HTTP request.
"""
def __init__(self, response: _ResponseType, message: Optional[Union[str, Dict[str, Any]]]):
self.response: _ResponseType = response
self.status: int = response.status # type: ignore
self.text: str
if isinstance(message, dict):
base = message.get('message', '')
errors = message.get('errors')
if errors:
errors = _flatten_error_dict(errors)
helpful = '\n'.join('In %s: %s' % t for t in errors.items())
self.text = base + '\n' + helpful
else:
self.text = base
else:
self.text = message or ''
fmt = '{0.status} {0.reason}'
if len(self.text):
fmt += ': {1}'
super().__init__(fmt.format(self.response, self.text))
class Forbidden(HTTPException):
"""Exception that's raised for when status code 403 occurs.
Subclass of :exc:`HTTPException`
"""
class NotFound(HTTPException):
"""Exception that's raised for when status code 404 occurs.
Subclass of :exc:`HTTPException`
"""
class RevoltServerError(HTTPException):
"""Exception that's raised for when a 500 range status code occurs.
Subclass of :exc:`HTTPException`.
"""
class ClientException(PyvoltException):
"""Exception that's raised when an operation in the :class:`Client` fails.
These are usually for exceptions that happened due to user input.
"""
class InvalidArgument(ClientException):
"""Exception that's raised when an argument to a function
is invalid some way (e.g. wrong value or wrong type).
This could be considered the analogous of ``ValueError`` and
``TypeError`` except inherited from :exc:`ClientException` and thus
:exc:`PyvoltException`.
"""
class LoginFailure(ClientException):
"""Exception that's raised when the :meth:`Client.login` function
fails to log you in from improper credentials or some other misc.
failure.
"""
class ConnectionClosed(ClientException):
"""Exception that's raised when the gateway connection is
closed for reasons that could not be handled internally.
Attributes
-----------
code: :class:`int`
The close code of the websocket.
reason: :class:`str`
The reason provided for the closure.
"""
def __init__(self, socket: ClientWebSocketResponse, *, code: Optional[int] = None):
# This exception is just the same exception except
# reconfigured to subclass ClientException for users
self.code: int = code or socket.close_code or -1
# aiohttp doesn't seem to consistently provide close reason
self.reason: str = ""
super().__init__(f"WebSocket closed with {self.code}")
| 29.767742 | 95 | 0.632206 |
13d62c81e841d667cea7e83e9550a3d422de45fa | 637 | py | Python | coding/learn_python/object_oriented_programming/tombola.py | yatao91/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | 3 | 2021-05-25T16:58:52.000Z | 2022-02-05T09:37:17.000Z | coding/learn_python/object_oriented_programming/tombola.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | coding/learn_python/object_oriented_programming/tombola.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import abc
class Tombola(abc.ABC):
@abc.abstractmethod
def load(self, iterable):
"""从可迭代对象中添加元素"""
@abc.abstractmethod
def pick(self):
"""随机删除元素,然后将其返回.
如果实例为空,这个方法应该抛出`LookupError`.
"""
def loaded(self):
"""如果至少有一个元素,返回`True`,否则返回`False`."""
return bool(self.inspect())
def inspect(self):
"""返回一个有序元组,由当前元素构成."""
items = []
while True:
try:
items.append(self.pick())
except LookupError:
break
self.load(items)
return tuple(sorted(items))
| 20.548387 | 45 | 0.516484 |
ead04d226b238debf7ef83525753dc9dda141b68 | 1,958 | py | Python | docs/conf.py | nickelpro/RikerBot | fdf8c96a3c13a4327afcefb650d1ad352ee6552b | [
"Zlib"
] | null | null | null | docs/conf.py | nickelpro/RikerBot | fdf8c96a3c13a4327afcefb650d1ad352ee6552b | [
"Zlib"
] | null | null | null | docs/conf.py | nickelpro/RikerBot | fdf8c96a3c13a4327afcefb650d1ad352ee6552b | [
"Zlib"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'RikerBot'
copyright = '2020, N. Vito Gamberini'
author = 'N. Vito Gamberini'
master_doc = 'index'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.758621 | 79 | 0.662411 |
cb211738a6e5490c07bd41b7a03d1b7307d87d7b | 8,492 | py | Python | hueComposer.py | sabjorn/hueComposer | 8b017a57f51eaaedd275f6e00f35d2e6207d5350 | [
"MIT"
] | null | null | null | hueComposer.py | sabjorn/hueComposer | 8b017a57f51eaaedd275f6e00f35d2e6207d5350 | [
"MIT"
] | 5 | 2017-06-21T17:53:18.000Z | 2019-03-14T23:27:12.000Z | hueComposer.py | sabjorn/hueComposer | 8b017a57f51eaaedd275f6e00f35d2e6207d5350 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2016 Steven A. Bjornson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
## Info
# The goal of this software is to fascilitate the creation
# of complex light patterns using a large array of
# Philips Hue bulbs.
#
# An image is imported with PIL and the pixels are mapped
# to the bulbs.
# The x-axis being time and y-axis being bulb index
import argparse
import glob # list all images
from time import sleep
from time import strftime
from time import time
import logging
import thread
import yaml
import netifaces
import os
import sys
import subprocess
import signal
from PIL import Image
import numpy as np
from scipy.misc import imresize
from phue import Bridge # https://github.com/studioimaginaire/phue
# turns out Hue [0, 65535] and Saturation [0, 254] are available
# as properties of the lights
from hueColour import Converter
http_delay = .04 # average time of HTTP transmission per light
def hueMain():
b = Bridge(args.ip)
#not fully tested!
if(args.username is None):
b.connect()
args.username = b.username
logging.info("Username: {}".format(b.username))
else:
b.username = args.username
# while True:
# try:
# lights = b.lights
# except:
# continue
# break
conv = Converter()
# make ordered list of images in directory
imgs = []
images = None
if(config_flag):
images = cfg['images']
for k, v in images.items():
imgs.append(v['filename'])
else:
imgs = glob.glob("{}/*.png".format(args.input))
while True:
try:
lights = b.lights
except:
logging.info("can't connect to lights")
sleep(1)
continue
break
# all lights off
for x in lights:
x.on = False
sleep(5)
while True:
for i, names in enumerate(imgs):
logging.info(names)
for t, x in enumerate(lights):
if(t < args.numlights):
x.on = True
x.bri = 1
try:
img = Image.open(args.base+names)
img = np.asarray(img)
except:
continue
logging.info("playing img: {}".format(args.base+names))
print "playing img: {}".format(args.base+names)
if(config_flag):
args.transition = images[i]['transition'] # to seconds?
args.rate = ((http_delay * args.numlights) + (args.transition))
for t, x in enumerate(lights):
if(t < args.numlights):
x.transitiontime = int(args.transition * 10)
x_step = int(img.shape[1] / float(images[i]['time']/args.rate))
logging.info("x-step size: {0}".format(x_step))
#shrink Y-axis to size of array
y_step = 1
if(img.shape[0] > args.numlights):
y_step = img.shape[0] / args.numlights
elif(img.shape[0] < args.numlights):
img = imresize(img, (args.numlights, img.shape[1]))
image_average = 0
image_start_time = time()
for x in np.arange(0, img.shape[1], x_step):
start_time = time()
for y in np.arange(0, args.numlights):
bri = int(np.average(img[y, x, :]))
lights[y].xy = conv.rgbToCIE(img[y*y_step, x, 0], img[y*y_step, x, 1], img[y*y_step, x, 2])
lights[y].bri = bri
image_average += time() - start_time
sleep(args.transition)
logging.info("time elapsed average image {0}, {1}".format(i, image_average/(img.shape[1]/x_step)))
logging.info("total image {0} elapsed time: {1}".format(i, time() - image_start_time))
# image finished animation
for m, light in enumerate(lights):
light.transitiontime = 0
light.on = False
light.bri = 1
sleep(10)
def audio(audio_file):
if audio_file is not None:
try:
return subprocess.Popen(["omxplayer", "--loop", "--vol", "352", audio_file], preexec_fn=os.setsid)
except Exception, e:
logging.exception(e)
else:
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default='192.168.1.100',
help='hue hub ip address')
parser.add_argument("--username", "-u", type=str,
help="Hue hub username.")
parser.add_argument("--input", "-i", type=str, default='./',
help='Input image directory')
parser.add_argument("--rate", "--r", type=float, default=1/5.,
help='Rate in ms to step between pixel values')
parser.add_argument("--transition", "--t", type=float, default=0,
help='Transition time in seconds')
parser.add_argument("--numlights", "--n", type=float, default=30,
help='Number of active lights')
parser.add_argument("--config", "-c", type=str,
help='run with config files')
parser.add_argument("--base", "-b", type=str, default="./",
help='base directory of images')
parser.add_argument("--log", "-l", type=str, default="/var/log",
help='log location')
parser.add_argument("--audio", "-a", type=str,
help='Audio File to Play')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.info('Started')
config_flag = args.config is not None
cfg = None
if(config_flag):
try:
with open(args.config, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
config = cfg['config']
args.numlights = config['lights']
args.username = config['username']
args.ip = config['ip']
args.audio = config['audio']
except IOError:
logging.exception("Could not open file: {}".format(args.config))
exit(1)
except:
logging.exception("Error in config file")
try:
proc = audio(args.audio)
hueMain()
except IOError as e:
# an IOError exception occurred (socket.error is a subclass)
# try is to make sure network is connected
logging.exception(e)
if (e.errno == 101):
# now we had the error code 101, network unreachable
# try is to make sure network is connected
while True:
addr = netifaces.ifaddresses("eth0")
logging.info(addr)
if(netifaces.AF_INET in addr):
break
sleep(1)
if e.errno == 113:
logging.info("Error 113, sleeping for 10 seconds before attempting reconnect")
sleep(10)
else:
logging.exception(e)
except KeyboardInterrupt, e:
#some sort of cleanup
if proc is not None:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
logging.info("Keyboard Interrupt")
except Exception, e:
if proc is not None:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
logging.exception(e)
logging.info('Finished')
| 35.090909 | 111 | 0.581371 |
104378e4da65cb8359cacd020b93716403705524 | 7,872 | py | Python | specification/scripts/spec_tools/validity.py | chrdavis/OpenXR-SDK-Source | 304c28c2f534375b2fd2a7d5e43b50ddc76495e6 | [
"Apache-2.0"
] | null | null | null | specification/scripts/spec_tools/validity.py | chrdavis/OpenXR-SDK-Source | 304c28c2f534375b2fd2a7d5e43b50ddc76495e6 | [
"Apache-2.0"
] | null | null | null | specification/scripts/spec_tools/validity.py | chrdavis/OpenXR-SDK-Source | 304c28c2f534375b2fd2a7d5e43b50ddc76495e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2013-2020 The Khronos Group Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
_A_VS_AN_RE = re.compile(r' a ([a-z]+:)?([aAeEiIoOxX]\w+\b)(?!:)')
_STARTS_WITH_MACRO_RE = re.compile(r'^[a-z]+:.*')
def _checkAnchorComponents(anchor):
"""Raise an exception if any component of a VUID anchor name is illegal."""
if anchor:
# Any other invalid things in an anchor name should be detected here.
if any((' ' in anchor_part for anchor_part in anchor)):
raise RuntimeError("Illegal component of a VUID anchor name!")
def _fix_a_vs_an(s):
"""Fix usage (often generated) of the indefinite article 'a' when 'an' is appropriate.
Explicitly excludes the markup macros."""
return _A_VS_AN_RE.sub(r' an \1\2', s)
class ValidityCollection:
"""Combines validity for a single entity."""
def __init__(self, entity_name=None, conventions=None, strict=True):
self.entity_name = entity_name
self.conventions = conventions
self.lines = []
self.strict = strict
def possiblyAddExtensionRequirement(self, extension_name, entity_preface):
"""Add an extension-related validity statement if required.
entity_preface is a string that goes between "must be enabled prior to "
and the name of the entity, and normally ends in a macro.
For instance, might be "calling flink:" for a function.
"""
if extension_name and not extension_name.startswith(self.conventions.api_version_prefix):
msg = 'The {} extension must: be enabled prior to {}{}'.format(
self.conventions.formatExtension(extension_name), entity_preface, self.entity_name)
self.addValidityEntry(msg, anchor=('extension', 'notenabled'))
def addValidityEntry(self, msg, anchor=None):
"""Add a validity entry, optionally with a VUID anchor.
If any trailing arguments are supplied,
an anchor is generated by concatenating them with dashes
at the end of the VUID anchor name.
"""
if not msg:
raise RuntimeError("Tried to add a blank validity line!")
parts = ['*']
_checkAnchorComponents(anchor)
if anchor:
if not self.entity_name:
raise RuntimeError('Cannot add a validity entry with an anchor to a collection that does not know its entity name.')
parts.append('[[{}]]'.format(
'-'.join(['VUID', self.entity_name] + list(anchor))))
parts.append(msg)
combined = _fix_a_vs_an(' '.join(parts))
if combined in self.lines:
raise RuntimeError("Duplicate validity added!")
self.lines.append(combined)
def addText(self, msg):
"""Add already formatted validity text."""
if self.strict:
raise RuntimeError('addText called when collection in strict mode')
if not msg:
return
msg = msg.rstrip()
if not msg:
return
self.lines.append(msg)
def _extend(self, lines):
lines = list(lines)
dupes = set(lines).intersection(self.lines)
if dupes:
raise RuntimeError("The two sets contain some shared entries! " + str(dupes))
self.lines.extend(lines)
def __iadd__(self, other):
"""Perform += with a string, iterable, or ValidityCollection."""
if other is None:
pass
elif isinstance(other, str):
if self.strict:
raise RuntimeError(
'Collection += a string when collection in strict mode')
if not other:
# empty string
pass
elif other.startswith('*'):
# Handle already-formatted
self.addText(other)
else:
# Do the formatting ourselves.
self.addValidityEntry(other)
elif isinstance(other, ValidityEntry):
if other:
if other.verbose:
print(self.entity_name, 'Appending', str(other))
self.addValidityEntry(str(other), anchor=other.anchor)
elif isinstance(other, ValidityCollection):
if not self.entity_name == other.entity_name:
raise RuntimeError(
"Trying to combine two ValidityCollections for different entities!")
self._extend(other.lines)
else:
# Deal with other iterables.
self._extend(other)
return self
def __bool__(self):
"""Is the collection non-empty?"""
empty = not self.lines
return not empty
@property
def text(self):
"""Access validity statements as a single string or None."""
if not self.lines:
return None
return '\n'.join(self.lines) + '\n'
def __str__(self):
"""Access validity statements as a single string or empty string."""
if not self:
return ''
return self.text
def __repr__(self):
return '<ValidityCollection: {}>'.format(self.lines)
class ValidityEntry:
"""A single validity line in progress."""
def __init__(self, text=None, anchor=None):
"""Prepare to add a validity entry, optionally with a VUID anchor.
An anchor is generated by concatenating the elements of the anchor tuple with dashes
at the end of the VUID anchor name.
"""
_checkAnchorComponents(anchor)
if isinstance(anchor, str):
# anchor needs to be a tuple
anchor = (anchor,)
self.anchor = anchor
self.parts = []
self.verbose = False
if text:
self.append(text)
def append(self, part):
"""Append a part of a string.
If this is the first entry part and the part doesn't start
with a markup macro, the first character will be capitalized."""
if not self.parts and not _STARTS_WITH_MACRO_RE.match(part):
self.parts.append(part[:1].upper())
self.parts.append(part[1:])
else:
self.parts.append(part)
if self.verbose:
print('ValidityEntry', id(self), 'after append:', str(self))
def drop_end(self, n):
"""Remove up to n trailing characters from the string."""
temp = str(self)
n = min(len(temp), n)
self.parts = [temp[:-n]]
def __iadd__(self, other):
"""Perform += with a string,"""
self.append(other)
return self
def __bool__(self):
"""Return true if we have something more than just an anchor."""
empty = not self.parts
return not empty
def __str__(self):
"""Access validity statement as a single string or empty string."""
if not self:
raise RuntimeError("No parts added?")
return ''.join(self.parts).strip()
def __repr__(self):
parts = ['<ValidityEntry: ']
if self:
parts.append('"')
parts.append(str(self))
parts.append('"')
else:
parts.append('EMPTY')
if self.anchor:
parts.append(', anchor={}'.format('-'.join(self.anchor)))
parts.append('>')
return ''.join(parts)
| 34.986667 | 132 | 0.604294 |
33857371419783c30d0ac19ec774a569bbdf5c81 | 93,717 | py | Python | cisco-ios-xe/ydk/models/cisco_ios_xe/test/import_tests.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/test/import_tests.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/test/import_tests.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-07-22T04:04:44.000Z | 2020-07-22T04:04:44.000Z |
import unittest
class ImportTest(unittest.TestCase):
def test_ATM_FORUM_TC_MIB(self):
from ydk.models.cisco_ios_xe.ATM_FORUM_TC_MIB import TruthValue
from ydk.models.cisco_ios_xe.ATM_FORUM_TC_MIB import AtmServiceCategory
def test_ATM_MIB(self):
from ydk.models.cisco_ios_xe.ATM_MIB import ATMMIB
def test_ATM_TC_MIB(self):
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmnotrafficdescriptor
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmnoclpnoscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclpnotaggingnoscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclptaggingnoscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmnoclpscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclpnotaggingscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclptaggingscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclpnotaggingmcr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclptransparentnoscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclptransparentscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmnoclptaggingnoscr
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmnoclpnoscrcdvt
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmnoclpscrcdvt
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclpnotaggingscrcdvt
from ydk.models.cisco_ios_xe.ATM_TC_MIB import Atmclptaggingscrcdvt
from ydk.models.cisco_ios_xe.ATM_TC_MIB import AtmConnCastType
from ydk.models.cisco_ios_xe.ATM_TC_MIB import AtmConnKind
from ydk.models.cisco_ios_xe.ATM_TC_MIB import AtmInterfaceType
from ydk.models.cisco_ios_xe.ATM_TC_MIB import AtmServiceCategory
from ydk.models.cisco_ios_xe.ATM_TC_MIB import AtmVorXAdminStatus
from ydk.models.cisco_ios_xe.ATM_TC_MIB import AtmVorXOperStatus
def test_BGP4_MIB(self):
from ydk.models.cisco_ios_xe.BGP4_MIB import BGP4MIB
def test_BRIDGE_MIB(self):
from ydk.models.cisco_ios_xe.BRIDGE_MIB import BRIDGEMIB
def test_CISCO_AAA_SERVER_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_AAA_SERVER_MIB import CiscoAAAProtocol
from ydk.models.cisco_ios_xe.CISCO_AAA_SERVER_MIB import CISCOAAASERVERMIB
def test_CISCO_AAA_SESSION_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_AAA_SESSION_MIB import CISCOAAASESSIONMIB
def test_CISCO_AAL5_MIB(self):
pass
def test_CISCO_ATM_EXT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ATM_EXT_MIB import OamCCStatus
from ydk.models.cisco_ios_xe.CISCO_ATM_EXT_MIB import OamCCVcState
def test_CISCO_ATM_PVCTRAP_EXTN_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ATM_PVCTRAP_EXTN_MIB import CatmOAMRecoveryType
from ydk.models.cisco_ios_xe.CISCO_ATM_PVCTRAP_EXTN_MIB import CatmOAMFailureType
from ydk.models.cisco_ios_xe.CISCO_ATM_PVCTRAP_EXTN_MIB import CISCOATMPVCTRAPEXTNMIB
def test_CISCO_ATM_QOS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ATM_QOS_MIB import VcParamConfigLocation
from ydk.models.cisco_ios_xe.CISCO_ATM_QOS_MIB import VpState
from ydk.models.cisco_ios_xe.CISCO_ATM_QOS_MIB import CISCOATMQOSMIB
def test_CISCO_BGP4_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_BGP4_MIB import CbgpSafi
from ydk.models.cisco_ios_xe.CISCO_BGP4_MIB import CISCOBGP4MIB
def test_CISCO_BGP_POLICY_ACCOUNTING_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_BGP_POLICY_ACCOUNTING_MIB import CISCOBGPPOLICYACCOUNTINGMIB
def test_CISCO_BULK_FILE_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_BULK_FILE_MIB import CISCOBULKFILEMIB
def test_CISCO_CBP_TARGET_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CBP_TARGET_MIB import CISCOCBPTARGETMIB
def test_CISCO_CBP_TARGET_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CBP_TARGET_TC_MIB import CcbptTargetType
from ydk.models.cisco_ios_xe.CISCO_CBP_TARGET_TC_MIB import CcbptTargetDirection
from ydk.models.cisco_ios_xe.CISCO_CBP_TARGET_TC_MIB import CcbptPolicySourceType
def test_CISCO_CBP_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CBP_TC_MIB import CbpExecutionStrategy
def test_CISCO_CDP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CDP_MIB import CISCOCDPMIB
def test_CISCO_CEF_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CEF_MIB import CISCOCEFMIB
def test_CISCO_CEF_TC(self):
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefIpVersion
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefAdjLinkType
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefPathType
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefPrefixSearchState
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefForwardingElementSpecialType
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefAdminStatus
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefOperStatus
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefFailureReason
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefCCType
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefCCAction
from ydk.models.cisco_ios_xe.CISCO_CEF_TC import CefCCStatus
def test_CISCO_CONFIG_COPY_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CONFIG_COPY_MIB import ConfigCopyProtocol
from ydk.models.cisco_ios_xe.CISCO_CONFIG_COPY_MIB import ConfigCopyState
from ydk.models.cisco_ios_xe.CISCO_CONFIG_COPY_MIB import ConfigCopyFailCause
from ydk.models.cisco_ios_xe.CISCO_CONFIG_COPY_MIB import ConfigFileType
from ydk.models.cisco_ios_xe.CISCO_CONFIG_COPY_MIB import CISCOCONFIGCOPYMIB
def test_CISCO_CONFIG_MAN_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CONFIG_MAN_MIB import HistoryEventMedium
from ydk.models.cisco_ios_xe.CISCO_CONFIG_MAN_MIB import CISCOCONFIGMANMIB
def test_CISCO_CONTEXT_MAPPING_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_CONTEXT_MAPPING_MIB import CISCOCONTEXTMAPPINGMIB
def test_CISCO_DATA_COLLECTION_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_DATA_COLLECTION_MIB import CdcFileFormat
from ydk.models.cisco_ios_xe.CISCO_DATA_COLLECTION_MIB import CdcFileXferStatus
from ydk.models.cisco_ios_xe.CISCO_DATA_COLLECTION_MIB import CISCODATACOLLECTIONMIB
def test_CISCO_DIAL_CONTROL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_DIAL_CONTROL_MIB import CISCODIALCONTROLMIB
def test_CISCO_DOT3_OAM_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_DOT3_OAM_MIB import CISCODOT3OAMMIB
def test_CISCO_DYNAMIC_TEMPLATE_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_DYNAMIC_TEMPLATE_MIB import CISCODYNAMICTEMPLATEMIB
def test_CISCO_DYNAMIC_TEMPLATE_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_DYNAMIC_TEMPLATE_TC_MIB import DynamicTemplateType
from ydk.models.cisco_ios_xe.CISCO_DYNAMIC_TEMPLATE_TC_MIB import DynamicTemplateTargetType
def test_CISCO_EIGRP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_EIGRP_MIB import CISCOEIGRPMIB
def test_CISCO_EMBEDDED_EVENT_MGR_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_EMBEDDED_EVENT_MGR_MIB import NotifySource
from ydk.models.cisco_ios_xe.CISCO_EMBEDDED_EVENT_MGR_MIB import CISCOEMBEDDEDEVENTMGRMIB
def test_CISCO_ENHANCED_MEMPOOL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENHANCED_MEMPOOL_MIB import CempMemPoolTypes
from ydk.models.cisco_ios_xe.CISCO_ENHANCED_MEMPOOL_MIB import CISCOENHANCEDMEMPOOLMIB
def test_CISCO_ENTITY_ALARM_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENTITY_ALARM_MIB import AlarmSeverity
from ydk.models.cisco_ios_xe.CISCO_ENTITY_ALARM_MIB import CISCOENTITYALARMMIB
def test_CISCO_ENTITY_EXT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENTITY_EXT_MIB import CISCOENTITYEXTMIB
def test_CISCO_ENTITY_FRU_CONTROL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import PowerRedundancyType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import PowerAdminType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import PowerOperType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import ModuleAdminType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import ModuleOperType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import ModuleResetReasonType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import FRUCoolingUnit
from ydk.models.cisco_ios_xe.CISCO_ENTITY_FRU_CONTROL_MIB import CISCOENTITYFRUCONTROLMIB
def test_CISCO_ENTITY_QFP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENTITY_QFP_MIB import CiscoQfpTimeInterval
from ydk.models.cisco_ios_xe.CISCO_ENTITY_QFP_MIB import CiscoQfpMemoryResource
from ydk.models.cisco_ios_xe.CISCO_ENTITY_QFP_MIB import CISCOENTITYQFPMIB
def test_CISCO_ENTITY_SENSOR_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB import SensorDataType
from ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB import SensorDataScale
from ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB import SensorStatus
from ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB import SensorThresholdSeverity
from ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB import SensorThresholdRelation
from ydk.models.cisco_ios_xe.CISCO_ENTITY_SENSOR_MIB import CISCOENTITYSENSORMIB
def test_CISCO_ENTITY_VENDORTYPE_OID_MIB(self):
pass
def test_CISCO_ENVMON_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ENVMON_MIB import CiscoEnvMonState
from ydk.models.cisco_ios_xe.CISCO_ENVMON_MIB import CISCOENVMONMIB
def test_CISCO_ETHERLIKE_EXT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ETHERLIKE_EXT_MIB import CISCOETHERLIKEEXTMIB
def test_CISCO_ETHER_CFM_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_ETHER_CFM_MIB import CISCOETHERCFMMIB
def test_CISCO_FIREWALL_TC(self):
from ydk.models.cisco_ios_xe.CISCO_FIREWALL_TC import CFWNetworkProtocol
from ydk.models.cisco_ios_xe.CISCO_FIREWALL_TC import CFWApplicationProtocol
from ydk.models.cisco_ios_xe.CISCO_FIREWALL_TC import CFWPolicyTargetType
from ydk.models.cisco_ios_xe.CISCO_FIREWALL_TC import CFWUrlfVendorId
from ydk.models.cisco_ios_xe.CISCO_FIREWALL_TC import CFWUrlServerStatus
def test_CISCO_FLASH_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_FLASH_MIB import FlashFileType
from ydk.models.cisco_ios_xe.CISCO_FLASH_MIB import CISCOFLASHMIB
def test_CISCO_FTP_CLIENT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_FTP_CLIENT_MIB import CISCOFTPCLIENTMIB
def test_CISCO_HSRP_EXT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_HSRP_EXT_MIB import CISCOHSRPEXTMIB
def test_CISCO_HSRP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_HSRP_MIB import HsrpState
from ydk.models.cisco_ios_xe.CISCO_HSRP_MIB import CISCOHSRPMIB
def test_CISCO_IETF_ATM2_PVCTRAP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_ATM2_PVCTRAP_MIB import CISCOIETFATM2PVCTRAPMIB
def test_CISCO_IETF_ATM2_PVCTRAP_MIB_EXTN(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_ATM2_PVCTRAP_MIB_EXTN import CISCOIETFATM2PVCTRAPMIBEXTN
def test_CISCO_IETF_BFD_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_BFD_MIB import CiscoBfdDiag
from ydk.models.cisco_ios_xe.CISCO_IETF_BFD_MIB import CISCOIETFBFDMIB
def test_CISCO_IETF_FRR_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB import CISCOIETFFRRMIB
def test_CISCO_IETF_ISIS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CiiAdminState
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CiiLevelState
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CiiSupportedProtocol
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CiiMetricType
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CiiMetricStyle
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CiiISLevel
from ydk.models.cisco_ios_xe.CISCO_IETF_ISIS_MIB import CISCOIETFISISMIB
def test_CISCO_IETF_MPLS_ID_STD_03_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_MPLS_ID_STD_03_MIB import CISCOIETFMPLSIDSTD03MIB
def test_CISCO_IETF_MPLS_TE_EXT_STD_03_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_MPLS_TE_EXT_STD_03_MIB import CISCOIETFMPLSTEEXTSTD03MIB
def test_CISCO_IETF_PW_ATM_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_ATM_MIB import CISCOIETFPWATMMIB
def test_CISCO_IETF_PW_ENET_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_ENET_MIB import CISCOIETFPWENETMIB
def test_CISCO_IETF_PW_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_MIB import CISCOIETFPWMIB
def test_CISCO_IETF_PW_MPLS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB import CISCOIETFPWMPLSMIB
def test_CISCO_IETF_PW_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_TC_MIB import CpwOperStatus
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_TC_MIB import CpwVcType
def test_CISCO_IETF_PW_TDM_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IETF_PW_TDM_MIB import CISCOIETFPWTDMMIB
def test_CISCO_IF_EXTENSION_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IF_EXTENSION_MIB import IfIndexPersistenceState
from ydk.models.cisco_ios_xe.CISCO_IF_EXTENSION_MIB import CISCOIFEXTENSIONMIB
def test_CISCO_IGMP_FILTER_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IGMP_FILTER_MIB import CISCOIGMPFILTERMIB
def test_CISCO_IMAGE_LICENSE_MGMT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IMAGE_LICENSE_MGMT_MIB import CISCOIMAGELICENSEMGMTMIB
def test_CISCO_IMAGE_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IMAGE_MIB import CISCOIMAGEMIB
def test_CISCO_IPMROUTE_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPMROUTE_MIB import CISCOIPMROUTEMIB
def test_CISCO_IPSEC_FLOW_MONITOR_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import IkePeerType
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import IkeNegoMode
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import IkeHashAlgo
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import IkeAuthMethod
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import DiffHellmanGrp
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import KeyType
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import EncapMode
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import EncryptAlgo
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import AuthAlgo
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import CompAlgo
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import EndPtType
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import TunnelStatus
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import TrapStatus
from ydk.models.cisco_ios_xe.CISCO_IPSEC_FLOW_MONITOR_MIB import CISCOIPSECFLOWMONITORMIB
def test_CISCO_IPSEC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import CryptomapType
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import CryptomapSetBindStatus
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import IkeHashAlgo
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import IkeAuthMethod
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import IkeIdentityType
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import DiffHellmanGrp
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import EncryptAlgo
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import TrapStatus
from ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB import CISCOIPSECMIB
def test_CISCO_IPSEC_POLICY_MAP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSEC_POLICY_MAP_MIB import CISCOIPSECPOLICYMAPMIB
def test_CISCO_IPSLA_AUTOMEASURE_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSLA_AUTOMEASURE_MIB import CISCOIPSLAAUTOMEASUREMIB
def test_CISCO_IPSLA_ECHO_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB import CISCOIPSLAECHOMIB
def test_CISCO_IPSLA_JITTER_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSLA_JITTER_MIB import CISCOIPSLAJITTERMIB
def test_CISCO_IPSLA_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IPSLA_TC_MIB import IpSlaOperType
from ydk.models.cisco_ios_xe.CISCO_IPSLA_TC_MIB import IpSlaCodecType
from ydk.models.cisco_ios_xe.CISCO_IPSLA_TC_MIB import IpSlaReactVar
def test_CISCO_IP_LOCAL_POOL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IP_LOCAL_POOL_MIB import CISCOIPLOCALPOOLMIB
def test_CISCO_IP_TAP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IP_TAP_MIB import CISCOIPTAPMIB
def test_CISCO_IP_URPF_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_IP_URPF_MIB import UnicastRpfType
from ydk.models.cisco_ios_xe.CISCO_IP_URPF_MIB import CISCOIPURPFMIB
def test_CISCO_LICENSE_MGMT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_LICENSE_MGMT_MIB import ClmgmtLicenseTransferProtocol
from ydk.models.cisco_ios_xe.CISCO_LICENSE_MGMT_MIB import ClmgmtLicenseActionState
from ydk.models.cisco_ios_xe.CISCO_LICENSE_MGMT_MIB import ClmgmtLicenseActionFailCause
from ydk.models.cisco_ios_xe.CISCO_LICENSE_MGMT_MIB import CISCOLICENSEMGMTMIB
def test_CISCO_MEDIA_GATEWAY_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_MEDIA_GATEWAY_MIB import CGwServiceState
from ydk.models.cisco_ios_xe.CISCO_MEDIA_GATEWAY_MIB import CGwAdminState
from ydk.models.cisco_ios_xe.CISCO_MEDIA_GATEWAY_MIB import CCallControlJitterDelayMode
from ydk.models.cisco_ios_xe.CISCO_MEDIA_GATEWAY_MIB import CISCOMEDIAGATEWAYMIB
def test_CISCO_MPLS_LSR_EXT_STD_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB import CISCOMPLSLSREXTSTDMIB
def test_CISCO_MPLS_TC_EXT_STD_MIB(self):
pass
def test_CISCO_NBAR_PROTOCOL_DISCOVERY_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_NBAR_PROTOCOL_DISCOVERY_MIB import CiscoPdDataType
from ydk.models.cisco_ios_xe.CISCO_NBAR_PROTOCOL_DISCOVERY_MIB import CISCONBARPROTOCOLDISCOVERYMIB
def test_CISCO_NETSYNC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncIfType
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncNetworkOption
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncEECOption
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncQLMode
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncClockMode
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncQualityLevel
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncSSMCap
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CiscoNetsyncESMCCap
from ydk.models.cisco_ios_xe.CISCO_NETSYNC_MIB import CISCONETSYNCMIB
def test_CISCO_NTP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_NTP_MIB import NTPLeapIndicator
from ydk.models.cisco_ios_xe.CISCO_NTP_MIB import CISCONTPMIB
def test_CISCO_OSPF_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_OSPF_MIB import CISCOOSPFMIB
def test_CISCO_OSPF_TRAP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_OSPF_TRAP_MIB import CISCOOSPFTRAPMIB
def test_CISCO_PIM_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_PIM_MIB import CISCOPIMMIB
def test_CISCO_PING_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_PING_MIB import CISCOPINGMIB
def test_CISCO_POWER_ETHERNET_EXT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_POWER_ETHERNET_EXT_MIB import CpeExtLldpPwrType
from ydk.models.cisco_ios_xe.CISCO_POWER_ETHERNET_EXT_MIB import CpeExtLldpPwrSrc
from ydk.models.cisco_ios_xe.CISCO_POWER_ETHERNET_EXT_MIB import CpeExtPwrPriority
from ydk.models.cisco_ios_xe.CISCO_POWER_ETHERNET_EXT_MIB import CISCOPOWERETHERNETEXTMIB
def test_CISCO_PROCESS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_PROCESS_MIB import CISCOPROCESSMIB
def test_CISCO_PRODUCTS_MIB(self):
pass
def test_CISCO_PTP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockMechanismType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockPortState
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockProfileType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockQualityAccuracyType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockRoleType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockStateType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockTimeSourceType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockTxModeType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import ClockType
from ydk.models.cisco_ios_xe.CISCO_PTP_MIB import CISCOPTPMIB
def test_CISCO_QOS_PIB_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_QOS_PIB_MIB import QueueRange
from ydk.models.cisco_ios_xe.CISCO_QOS_PIB_MIB import ThresholdSetRange
from ydk.models.cisco_ios_xe.CISCO_QOS_PIB_MIB import QosInterfaceQueueType
from ydk.models.cisco_ios_xe.CISCO_QOS_PIB_MIB import CISCOQOSPIBMIB
def test_CISCO_RADIUS_EXT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_RADIUS_EXT_MIB import CISCORADIUSEXTMIB
def test_CISCO_RF_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFState
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFMode
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFAction
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFSwactReasonType
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFIssuState
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFIssuStateRev1
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import RFClientStatus
from ydk.models.cisco_ios_xe.CISCO_RF_MIB import CISCORFMIB
def test_CISCO_RTTMON_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_RTTMON_MIB import CISCORTTMONMIB
def test_CISCO_RTTMON_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttReset
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMonOperation
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttResponseSense
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMonRttType
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMplsVpnMonRttType
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMplsVpnMonLpdFailureSense
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMplsVpnMonLpdGrpStatus
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMonProtocol
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMonCodecType
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMonLSPPingReplyMode
from ydk.models.cisco_ios_xe.CISCO_RTTMON_TC_MIB import RttMonReactVar
def test_CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB import CiscoSbcPeriodicStatsInterval
from ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_CALL_STATS_MIB import CISCOSESSBORDERCTRLRCALLSTATSMIB
def test_CISCO_SESS_BORDER_CTRLR_STATS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB import CiscoSbcSIPMethod
from ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB import CiscoSbcRadiusClientType
from ydk.models.cisco_ios_xe.CISCO_SESS_BORDER_CTRLR_STATS_MIB import CISCOSESSBORDERCTRLRSTATSMIB
def test_CISCO_SIP_UA_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SIP_UA_MIB import Ciscosipuamibnotificationprefix
from ydk.models.cisco_ios_xe.CISCO_SIP_UA_MIB import Ciscosipuamibnotifications
from ydk.models.cisco_ios_xe.CISCO_SIP_UA_MIB import CISCOSIPUAMIB
def test_CISCO_SMI(self):
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoproducts
from ydk.models.cisco_ios_xe.CISCO_SMI import Local
from ydk.models.cisco_ios_xe.CISCO_SMI import Temporary
from ydk.models.cisco_ios_xe.CISCO_SMI import Pakmon
from ydk.models.cisco_ios_xe.CISCO_SMI import Workgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Otherenterprises
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscosb
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscosmb
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoagentcapability
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoconfig
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscomgmt
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoexperiment
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoadmin
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoproxy
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscorptrgroupobjectid
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscounknownrptrgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Cisco2505Rptrgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Cisco2507Rptrgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Cisco2516Rptrgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscowsx5020Rptrgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscochipsets
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscochipsetsaint1
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscochipsetsaint2
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscochipsetsaint3
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscochipsetsaint4
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscomodules
from ydk.models.cisco_ios_xe.CISCO_SMI import Lightstream
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscoworks
from ydk.models.cisco_ios_xe.CISCO_SMI import Newport
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscopartnerproducts
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscopolicy
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscopib
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscopolicyauto
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscopibtomib
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscodomains
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainudpipv4
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainudpipv6
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomaintcpipv4
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomaintcpipv6
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainlocal
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainclns
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomaincons
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainddp
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainipx
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainsctpipv4
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscotdomainsctpipv6
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscocib
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscocibmmigroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscocibprovgroup
from ydk.models.cisco_ios_xe.CISCO_SMI import Ciscopki
def test_CISCO_SONET_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SONET_MIB import CsApsLineFailureCode
from ydk.models.cisco_ios_xe.CISCO_SONET_MIB import CsApsLineSwitchReason
from ydk.models.cisco_ios_xe.CISCO_SONET_MIB import CISCOSONETMIB
def test_CISCO_STP_EXTENSIONS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_STP_EXTENSIONS_MIB import CISCOSTPEXTENSIONSMIB
def test_CISCO_ST_TC(self):
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcPortTypes
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcPortTxTypes
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcPortModuleTypes
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcIfSpeed
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcAddressType
from ydk.models.cisco_ios_xe.CISCO_ST_TC import InterfaceOperMode
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcIfServiceStateType
from ydk.models.cisco_ios_xe.CISCO_ST_TC import FcIfSfpDiagLevelType
def test_CISCO_SUBSCRIBER_IDENTITY_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_IDENTITY_TC_MIB import SubSessionIdentity
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_IDENTITY_TC_MIB import SubscriberMediaType
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_IDENTITY_TC_MIB import SubscriberProtocolType
def test_CISCO_SUBSCRIBER_SESSION_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_MIB import CISCOSUBSCRIBERSESSIONMIB
def test_CISCO_SUBSCRIBER_SESSION_TC_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB import SubSessionType
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB import SubSessionState
from ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB import SubSessionRedundancyMode
def test_CISCO_SYSLOG_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_SYSLOG_MIB import SyslogSeverity
from ydk.models.cisco_ios_xe.CISCO_SYSLOG_MIB import CISCOSYSLOGMIB
def test_CISCO_TAP2_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_TAP2_MIB import CISCOTAP2MIB
def test_CISCO_TC(self):
from ydk.models.cisco_ios_xe.CISCO_TC import CiscoNetworkProtocol
from ydk.models.cisco_ios_xe.CISCO_TC import CiscoRowOperStatus
from ydk.models.cisco_ios_xe.CISCO_TC import CiscoLocationClass
from ydk.models.cisco_ios_xe.CISCO_TC import CiscoAlarmSeverity
from ydk.models.cisco_ios_xe.CISCO_TC import CiscoPortListRange
from ydk.models.cisco_ios_xe.CISCO_TC import IfOperStatusReason
def test_CISCO_UBE_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_UBE_MIB import CISCOUBEMIB
def test_CISCO_UNIFIED_FIREWALL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_UNIFIED_FIREWALL_MIB import CISCOUNIFIEDFIREWALLMIB
def test_CISCO_VLAN_IFTABLE_RELATIONSHIP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VLAN_IFTABLE_RELATIONSHIP_MIB import CISCOVLANIFTABLERELATIONSHIPMIB
def test_CISCO_VLAN_MEMBERSHIP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VLAN_MEMBERSHIP_MIB import CISCOVLANMEMBERSHIPMIB
def test_CISCO_VOICE_COMMON_DIAL_CONTROL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CvcSpeechCoderRate
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CvcFaxTransmitRate
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CvcCoderTypeRate
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CvcInBandSignaling
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CvcH320CallType
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CvcVideoCoderRate
from ydk.models.cisco_ios_xe.CISCO_VOICE_COMMON_DIAL_CONTROL_MIB import CISCOVOICECOMMONDIALCONTROLMIB
def test_CISCO_VOICE_DIAL_CONTROL_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CvCallVolumeWMIntvlType
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CvCallVolumeStatsIntvlType
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CvSessionProtocol
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CvAmrNbRtpEncap
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CvIlbcFrameMode
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CvCallConnectionType
from ydk.models.cisco_ios_xe.CISCO_VOICE_DIAL_CONTROL_MIB import CISCOVOICEDIALCONTROLMIB
def test_CISCO_VOICE_DNIS_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VOICE_DNIS_MIB import CISCOVOICEDNISMIB
def test_CISCO_VPDN_MGMT_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VPDN_MGMT_MIB import TunnelType
from ydk.models.cisco_ios_xe.CISCO_VPDN_MGMT_MIB import EndpointClass
from ydk.models.cisco_ios_xe.CISCO_VPDN_MGMT_MIB import CISCOVPDNMGMTMIB
def test_CISCO_VTP_MIB(self):
from ydk.models.cisco_ios_xe.CISCO_VTP_MIB import VlanType
from ydk.models.cisco_ios_xe.CISCO_VTP_MIB import CISCOVTPMIB
def test_Cisco_IOS_XE_aaa(self):
pass
def test_Cisco_IOS_XE_aaa_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_aaa_oper import AaaSessProtType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_aaa_oper import AaaData
def test_Cisco_IOS_XE_acl(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_acl import AclPortType
def test_Cisco_IOS_XE_acl_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_acl_oper import AccessLists
def test_Cisco_IOS_XE_arp(self):
pass
def test_Cisco_IOS_XE_atm(self):
pass
def test_Cisco_IOS_XE_avb(self):
pass
def test_Cisco_IOS_XE_bba_group(self):
pass
def test_Cisco_IOS_XE_bfd(self):
pass
def test_Cisco_IOS_XE_bfd_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bfd_oper import BfdOperSessionType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bfd_oper import BfdRemoteStateType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bfd_oper import BfdStateType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bfd_oper import BfdLspType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bfd_oper import BfdState
def test_Cisco_IOS_XE_bgp(self):
pass
def test_Cisco_IOS_XE_bgp_common_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_common_oper import AfiSafi
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_common_oper import TcpFsmState
def test_Cisco_IOS_XE_bgp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper import BgpLink
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper import BgpFsmState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper import BgpMode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper import BgpStateData
def test_Cisco_IOS_XE_bgp_route_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_route_oper import BgpOriginCode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_route_oper import BgpRpkiStatus
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_route_oper import BgpRouteFilters
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_route_oper import BgpNeighborRouteFilters
def test_Cisco_IOS_XE_bridge_domain(self):
pass
def test_Cisco_IOS_XE_call_home(self):
pass
def test_Cisco_IOS_XE_card(self):
pass
def test_Cisco_IOS_XE_cdp(self):
pass
def test_Cisco_IOS_XE_cdp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cdp_oper import CdpDuplex
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cdp_oper import CdpAdvVersion
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cdp_oper import CdpUnidirectionalMode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cdp_oper import CdpYesNo
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cdp_oper import CdpEnableDisable
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cdp_oper import CdpNeighborDetails
def test_Cisco_IOS_XE_cef(self):
pass
def test_Cisco_IOS_XE_cellular(self):
pass
def test_Cisco_IOS_XE_cellwan_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import ModemStatus
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import CwRadioPowerStatus
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import RadioBandwidth
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import ModemTechnology
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import RatPreference
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import RatTechnology
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import ServiceStatus
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import ModemService
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import LteCa
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import RegState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import PacketSessStatus
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import ProfileScope
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cellwan_oper import CellwanOperData
def test_Cisco_IOS_XE_cfm_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cfm_oper import CfmLastClearedType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_cfm_oper import CfmStatistics
def test_Cisco_IOS_XE_checkpoint_archive_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_checkpoint_archive_oper import CheckpointArchives
def test_Cisco_IOS_XE_coap(self):
pass
def test_Cisco_IOS_XE_common_types(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_common_types import AddrType
def test_Cisco_IOS_XE_controller(self):
pass
def test_Cisco_IOS_XE_crypto(self):
pass
def test_Cisco_IOS_XE_cts(self):
pass
def test_Cisco_IOS_XE_device_hardware_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_device_hardware_oper import HwType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_device_hardware_oper import AlarmSeverity
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_device_hardware_oper import DeviceHardwareData
def test_Cisco_IOS_XE_device_sensor(self):
pass
def test_Cisco_IOS_XE_device_tracking(self):
pass
def test_Cisco_IOS_XE_dhcp(self):
pass
def test_Cisco_IOS_XE_dhcp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_dhcp_oper import DhcpServerBindingState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_dhcp_oper import DhcpServerBindingType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_dhcp_oper import DhcpClientState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_dhcp_oper import DhcpExpiryOption
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_dhcp_oper import DhcpClientIdType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_dhcp_oper import DhcpOperData
def test_Cisco_IOS_XE_diagnostics(self):
pass
def test_Cisco_IOS_XE_diffserv_target_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_diffserv_target_oper import Direction
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_diffserv_target_oper import Inbound
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_diffserv_target_oper import Outbound
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_diffserv_target_oper import DiffservInterfacesState
def test_Cisco_IOS_XE_dot1x(self):
pass
def test_Cisco_IOS_XE_eem(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_eem import OperatorType
def test_Cisco_IOS_XE_efp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_efp_oper import EfpStats
def test_Cisco_IOS_XE_eigrp(self):
pass
def test_Cisco_IOS_XE_environment_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_environment_oper import SensorUnitsType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_environment_oper import EnvironmentSensors
def test_Cisco_IOS_XE_eta(self):
pass
def test_Cisco_IOS_XE_ethernet(self):
pass
def test_Cisco_IOS_XE_ezpm(self):
pass
def test_Cisco_IOS_XE_features(self):
pass
def test_Cisco_IOS_XE_fib_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper import FibAddressFamily
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper import EncapsulationHeaderType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper import FibPathType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_fib_oper import FibOperData
def test_Cisco_IOS_XE_flow(self):
pass
def test_Cisco_IOS_XE_flow_monitor_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_flow_monitor_oper import FlowExporterIpwriteStatsType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_flow_monitor_oper import FlowMonitorCacheType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_flow_monitor_oper import FlowMonitorCacheState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_flow_monitor_oper import FlowMonitors
def test_Cisco_IOS_XE_http(self):
pass
def test_Cisco_IOS_XE_icmp(self):
pass
def test_Cisco_IOS_XE_igmp(self):
pass
def test_Cisco_IOS_XE_interface_common(self):
pass
def test_Cisco_IOS_XE_interfaces(self):
pass
def test_Cisco_IOS_XE_interfaces_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import QosMatchType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import ThreshUnit
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import QosDirection
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import IntfState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import EtherDuplex
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import EtherSpeed
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import OperState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import IetfIntfType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import SerialCrc
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import SubrateSpeed
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import T1E1LoopbackMode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_interfaces_oper import Interfaces
def test_Cisco_IOS_XE_ip(self):
pass
def test_Cisco_IOS_XE_ip_sla_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ip_sla_oper import SlaOperType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ip_sla_oper import SlaReturnCode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ip_sla_oper import AccuracyType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ip_sla_oper import RttType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ip_sla_oper import TtlType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ip_sla_oper import IpSlaStats
def test_Cisco_IOS_XE_ipv6(self):
pass
def test_Cisco_IOS_XE_ipv6_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ipv6_oper import Ipv6NdTdlState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ipv6_oper import Ipv6Data
def test_Cisco_IOS_XE_isis(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_isis import IsisLevelType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_isis import IsisRoutesLevelType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_isis import AuthenticationLevelType
def test_Cisco_IOS_XE_iwanfabric(self):
pass
def test_Cisco_IOS_XE_l2vpn(self):
pass
def test_Cisco_IOS_XE_l3vpn(self):
pass
def test_Cisco_IOS_XE_license(self):
pass
def test_Cisco_IOS_XE_line(self):
pass
def test_Cisco_IOS_XE_lisp(self):
pass
def test_Cisco_IOS_XE_lisp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lisp_oper import LispAddressFamilyType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lisp_oper import LispIaftypeType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lisp_oper import LispMapReplyActionType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lisp_oper import LispRlocStateType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lisp_oper import LispSessionStateType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lisp_oper import LispState
def test_Cisco_IOS_XE_lldp(self):
pass
def test_Cisco_IOS_XE_lldp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_lldp_oper import LldpEntries
def test_Cisco_IOS_XE_logging(self):
pass
def test_Cisco_IOS_XE_mdt_cfg(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_cfg import MdtXfrmAttrType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_cfg import MdtXfrmOpType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_cfg import MdtXfrmLogicOp
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_cfg import MdtXfrmOperator
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_cfg import MdtSubscriptions
def test_Cisco_IOS_XE_mdt_common_defs(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_common_defs import MdtSubFilterType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_common_defs import MdtSubUpdateTrigger
def test_Cisco_IOS_XE_mdt_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_oper import MdtSubType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_oper import MdtSubState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_oper import MdtReceiverState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_oper import MdtConState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mdt_oper import MdtOperData
def test_Cisco_IOS_XE_memory_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_memory_oper import MemoryStatistics
def test_Cisco_IOS_XE_mka(self):
pass
def test_Cisco_IOS_XE_mld(self):
pass
def test_Cisco_IOS_XE_mmode(self):
pass
def test_Cisco_IOS_XE_mpls(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls import LdpDiscoveryAddressType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls import MplsTeTiebreakerType
def test_Cisco_IOS_XE_mpls_fwd_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper import MplsForwardingTable
def test_Cisco_IOS_XE_mpls_ldp(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsn
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnNone
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnTblIdMismatch
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnPpExists
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnMissingElem
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnNoPEndSock
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnPEndSockNotSynced
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrAdjAdd
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrDhcAdd
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnEnomem
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrTpCreate
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrPpCreate
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrAddrBind
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrRxBadPie
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrRxNotif
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrRxUnexpOpen
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrUnexpPeerDown
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrAppNotFound
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnErrAppInvalid
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrSyncNackRsnNoCtx
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErr
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrNone
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrLdpSyncNack
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrSyncPrep
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrTcpPeer
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrTcpGbl
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrLdpPeer
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrLdpGbl
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncErrAppFail
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IcpmType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IcpmTypeIccp
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IccpType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IccpTypeMlacp
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrPeerSyncState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LdpNsrPeerSyncStNone
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LdpNsrPeerSyncStWait
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LdpNsrPeerSyncStReady
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LdpNsrPeerSyncStPrep
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LdpNsrPeerSyncStAppWait
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LdpNsrPeerSyncStOper
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrStatus
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrStatusReady
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrStatusNotReady
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NsrStatusDisabled
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import DownNbrReason
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import DownNbrReasonNa
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import DownNbrReasonNbrHold
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import DownNbrReasonDiscHello
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathLblOwner
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathLblOwnerNone
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathLblOwnerLdp
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathLblOwnerBgp
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathLblOwnerStatic
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LabelType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LabelTypeMpls
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LabelTypeUnLabeled
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LabelTypeUnknown
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathIpNoFlag
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathIpProtected
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathIpBackup
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathIpBackupRemote
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RoutePathIpBgpBackup
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReason
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReasonNa
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReasonNoHelloAdj
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReasonNoPeerSess
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReasonPeerUpdateNotDone
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReasonPeerUpdateNotReceived
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncDownReasonInternal
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LoopDetectionType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import SessionState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import AdjState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import AdvLabelType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import NbrBgpAdvtState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IccpState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import DhcState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import IgpSyncState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import LocalLabelState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import Af
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import AfId
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import MplsLdp
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import ClearMsgCounters
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import RestartNeighbor
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_ldp import ClearForwarding
def test_Cisco_IOS_XE_multicast(self):
pass
def test_Cisco_IOS_XE_mvrp(self):
pass
def test_Cisco_IOS_XE_nat(self):
pass
def test_Cisco_IOS_XE_nat_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_nat_oper import NatData
def test_Cisco_IOS_XE_native(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_native import MonitorEventType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_native import LoggingLevelType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_native import Native
def test_Cisco_IOS_XE_nbar(self):
pass
def test_Cisco_IOS_XE_nd(self):
pass
def test_Cisco_IOS_XE_nhrp(self):
pass
def test_Cisco_IOS_XE_ntp(self):
pass
def test_Cisco_IOS_XE_ntp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ntp_oper import RefClockSourceType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ntp_oper import KissCodeType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ntp_oper import RefidPktTypeInfo
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ntp_oper import NtpOperData
def test_Cisco_IOS_XE_object_group(self):
pass
def test_Cisco_IOS_XE_ospf(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf import RedistOspfExternalType
def test_Cisco_IOS_XE_ospf_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf_oper import AddressFamily
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf_oper import OspfOperationMode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf_oper import OspfNetworkType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf_oper import OspfAuthType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf_oper import NbrStateType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ospf_oper import OspfOperData
def test_Cisco_IOS_XE_ospfv3(self):
pass
def test_Cisco_IOS_XE_otv(self):
pass
def test_Cisco_IOS_XE_parser(self):
pass
def test_Cisco_IOS_XE_pathmgr(self):
pass
def test_Cisco_IOS_XE_pfr(self):
pass
def test_Cisco_IOS_XE_platform(self):
pass
def test_Cisco_IOS_XE_platform_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_platform_oper import PlatformCompType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_platform_oper import PlatformPropValueType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_platform_oper import Components
def test_Cisco_IOS_XE_platform_software_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_platform_software_oper import BFru
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_platform_software_oper import CiscoPlatformSoftware
def test_Cisco_IOS_XE_policy(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_policy import PrecedenceType2
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_policy import PolicyActionType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_policy import ClassNameType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_policy import PolicePacketsBytesType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_policy import BytesMsUsType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_policy import PolicePpsBpsType
def test_Cisco_IOS_XE_power(self):
pass
def test_Cisco_IOS_XE_ppp(self):
pass
def test_Cisco_IOS_XE_ppp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ppp_oper import PppIosAuthType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ppp_oper import PppoeOperationalRole
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_ppp_oper import PppData
def test_Cisco_IOS_XE_process_cpu_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_process_cpu_oper import CpuUsage
def test_Cisco_IOS_XE_process_memory_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_process_memory_oper import MemoryUsageProcesses
def test_Cisco_IOS_XE_ptp(self):
pass
def test_Cisco_IOS_XE_qos(self):
pass
def test_Cisco_IOS_XE_rip(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rip import OffsetListInOutType
def test_Cisco_IOS_XE_route_map(self):
pass
def test_Cisco_IOS_XE_rpc(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Switch
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Default
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Clear
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Release
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Reload
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Cellular
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import License
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Service
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import VirtualService
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Copy
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Delete
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import AppHosting
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_rpc import Guestshell
def test_Cisco_IOS_XE_rsvp(self):
pass
def test_Cisco_IOS_XE_sanet(self):
pass
def test_Cisco_IOS_XE_segment_routing(self):
pass
def test_Cisco_IOS_XE_service_chain(self):
pass
def test_Cisco_IOS_XE_service_discovery(self):
pass
def test_Cisco_IOS_XE_service_insertion(self):
pass
def test_Cisco_IOS_XE_service_routing(self):
pass
def test_Cisco_IOS_XE_sla(self):
pass
def test_Cisco_IOS_XE_snmp(self):
pass
def test_Cisco_IOS_XE_spanning_tree(self):
pass
def test_Cisco_IOS_XE_spanning_tree_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpPortState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpPortRole
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpLinkRole
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpPortGuard
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpPortBpduguard
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpPortBpdufilter
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpMode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_spanning_tree_oper import StpDetails
def test_Cisco_IOS_XE_stackwise_virtual(self):
pass
def test_Cisco_IOS_XE_switch(self):
pass
def test_Cisco_IOS_XE_tcam_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_tcam_oper import TcamDetails
def test_Cisco_IOS_XE_template(self):
pass
def test_Cisco_IOS_XE_track(self):
pass
def test_Cisco_IOS_XE_trustsec_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_trustsec_oper import CtsOdmBindingSource
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_trustsec_oper import SxpConState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_trustsec_oper import SxpConMode
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_trustsec_oper import TrustsecState
def test_Cisco_IOS_XE_tunnel(self):
pass
def test_Cisco_IOS_XE_types(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import AccessListInOutType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import AclUdpPortType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import AclTcpPortType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import RedistOspfExternalType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import CosValueType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import DscpType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import ExpValueType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import InterfaceType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import MobilityType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import PrecValueType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import PrecedenceType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import LimitDcNonDcType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import QosValueType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import WeekdayType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import BgpIpv4AfType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import BgpIpv6AfType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import CommunityWellKnownType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import CommunityWellKnownAddType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_types import MonthType
def test_Cisco_IOS_XE_udld(self):
pass
def test_Cisco_IOS_XE_umbrella(self):
pass
def test_Cisco_IOS_XE_utd(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_utd import UtdCategoryType
def test_Cisco_IOS_XE_virtual_service_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_virtual_service_oper import VirtualServices
def test_Cisco_IOS_XE_vlan(self):
pass
def test_Cisco_IOS_XE_vlan_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vlan_oper import VlanStatusType
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vlan_oper import Vlans
def test_Cisco_IOS_XE_voice(self):
pass
def test_Cisco_IOS_XE_vpdn(self):
pass
def test_Cisco_IOS_XE_vrrp_oper(self):
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vrrp_oper import ProtoVersion
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vrrp_oper import MasterReason
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vrrp_oper import VrrpProtoState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vrrp_oper import OmpStateUpdown
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vrrp_oper import TrackState
from ydk.models.cisco_ios_xe.Cisco_IOS_XE_vrrp_oper import VrrpOperData
def test_Cisco_IOS_XE_vservice(self):
pass
def test_Cisco_IOS_XE_vstack(self):
pass
def test_Cisco_IOS_XE_vtp(self):
pass
def test_Cisco_IOS_XE_wccp(self):
pass
def test_Cisco_IOS_XE_wsma(self):
pass
def test_Cisco_IOS_XE_zone(self):
pass
def test_DIAL_CONTROL_MIB(self):
from ydk.models.cisco_ios_xe.DIAL_CONTROL_MIB import DIALCONTROLMIB
def test_DIFFSERV_DSCP_TC(self):
pass
def test_DIFFSERV_MIB(self):
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamsimpletokenbucket
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamavgrate
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamsrtcmblind
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamsrtcmaware
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamtrtcmblind
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamtrtcmaware
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservtbparamtswtcm
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservschedulerpriority
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservschedulerwrr
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import Diffservschedulerwfq
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import IfDirection
from ydk.models.cisco_ios_xe.DIFFSERV_MIB import DIFFSERVMIB
def test_DISMAN_EVENT_MIB(self):
from ydk.models.cisco_ios_xe.DISMAN_EVENT_MIB import FailureReason
from ydk.models.cisco_ios_xe.DISMAN_EVENT_MIB import DISMANEVENTMIB
def test_DISMAN_EXPRESSION_MIB(self):
from ydk.models.cisco_ios_xe.DISMAN_EXPRESSION_MIB import DISMANEXPRESSIONMIB
def test_DRAFT_MSDP_MIB(self):
from ydk.models.cisco_ios_xe.DRAFT_MSDP_MIB import DRAFTMSDPMIB
def test_DS1_MIB(self):
from ydk.models.cisco_ios_xe.DS1_MIB import DS1MIB
def test_DS3_MIB(self):
from ydk.models.cisco_ios_xe.DS3_MIB import DS3MIB
def test_ENTITY_MIB(self):
from ydk.models.cisco_ios_xe.ENTITY_MIB import PhysicalClass
from ydk.models.cisco_ios_xe.ENTITY_MIB import ENTITYMIB
def test_ENTITY_SENSOR_MIB(self):
from ydk.models.cisco_ios_xe.ENTITY_SENSOR_MIB import EntitySensorDataType
from ydk.models.cisco_ios_xe.ENTITY_SENSOR_MIB import EntitySensorDataScale
from ydk.models.cisco_ios_xe.ENTITY_SENSOR_MIB import EntitySensorStatus
from ydk.models.cisco_ios_xe.ENTITY_SENSOR_MIB import ENTITYSENSORMIB
def test_ENTITY_STATE_MIB(self):
from ydk.models.cisco_ios_xe.ENTITY_STATE_MIB import ENTITYSTATEMIB
def test_ENTITY_STATE_TC_MIB(self):
from ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB import EntityAdminState
from ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB import EntityOperState
from ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB import EntityUsageState
from ydk.models.cisco_ios_xe.ENTITY_STATE_TC_MIB import EntityStandbyStatus
def test_ETHER_WIS(self):
from ydk.models.cisco_ios_xe.ETHER_WIS import ETHERWIS
def test_EXPRESSION_MIB(self):
from ydk.models.cisco_ios_xe.EXPRESSION_MIB import EXPRESSIONMIB
def test_EtherLike_MIB(self):
from ydk.models.cisco_ios_xe.EtherLike_MIB import Dot3Testtdr
from ydk.models.cisco_ios_xe.EtherLike_MIB import Dot3Testloopback
from ydk.models.cisco_ios_xe.EtherLike_MIB import Dot3Erroriniterror
from ydk.models.cisco_ios_xe.EtherLike_MIB import Dot3Errorloopbackerror
from ydk.models.cisco_ios_xe.EtherLike_MIB import EtherLikeMIB
def test_FRAME_RELAY_DTE_MIB(self):
from ydk.models.cisco_ios_xe.FRAME_RELAY_DTE_MIB import FRAMERELAYDTEMIB
def test_HCNUM_TC(self):
pass
def test_IANA_ADDRESS_FAMILY_NUMBERS_MIB(self):
from ydk.models.cisco_ios_xe.IANA_ADDRESS_FAMILY_NUMBERS_MIB import AddressFamilyNumbers
def test_IANA_RTPROTO_MIB(self):
from ydk.models.cisco_ios_xe.IANA_RTPROTO_MIB import IANAipRouteProtocol
from ydk.models.cisco_ios_xe.IANA_RTPROTO_MIB import IANAipMRouteProtocol
def test_IANAifType_MIB(self):
from ydk.models.cisco_ios_xe.IANAifType_MIB import IANAifType
from ydk.models.cisco_ios_xe.IANAifType_MIB import IANAtunnelType
def test_IEEE8021_TC_MIB(self):
from ydk.models.cisco_ios_xe.IEEE8021_TC_MIB import IEEE8021PriorityCodePoint
from ydk.models.cisco_ios_xe.IEEE8021_TC_MIB import IEEE8021BridgePortType
from ydk.models.cisco_ios_xe.IEEE8021_TC_MIB import IEEE8021ServiceSelectorType
from ydk.models.cisco_ios_xe.IEEE8021_TC_MIB import IEEE8021PortAcceptableFrameTypes
def test_IF_MIB(self):
from ydk.models.cisco_ios_xe.IF_MIB import IFMIB
def test_IGMP_STD_MIB(self):
from ydk.models.cisco_ios_xe.IGMP_STD_MIB import IGMPSTDMIB
def test_INET_ADDRESS_MIB(self):
from ydk.models.cisco_ios_xe.INET_ADDRESS_MIB import InetAddressType
from ydk.models.cisco_ios_xe.INET_ADDRESS_MIB import InetScopeType
from ydk.models.cisco_ios_xe.INET_ADDRESS_MIB import InetVersion
def test_INTEGRATED_SERVICES_MIB(self):
from ydk.models.cisco_ios_xe.INTEGRATED_SERVICES_MIB import QosService
from ydk.models.cisco_ios_xe.INTEGRATED_SERVICES_MIB import INTEGRATEDSERVICESMIB
def test_INT_SERV_MIB(self):
from ydk.models.cisco_ios_xe.INT_SERV_MIB import QosService
from ydk.models.cisco_ios_xe.INT_SERV_MIB import INTSERVMIB
def test_IPMROUTE_STD_MIB(self):
from ydk.models.cisco_ios_xe.IPMROUTE_STD_MIB import IPMROUTESTDMIB
def test_IPV6_FLOW_LABEL_MIB(self):
pass
def test_IP_FORWARD_MIB(self):
from ydk.models.cisco_ios_xe.IP_FORWARD_MIB import IPFORWARDMIB
def test_IP_MIB(self):
from ydk.models.cisco_ios_xe.IP_MIB import IpAddressOriginTC
from ydk.models.cisco_ios_xe.IP_MIB import IpAddressStatusTC
from ydk.models.cisco_ios_xe.IP_MIB import IpAddressPrefixOriginTC
from ydk.models.cisco_ios_xe.IP_MIB import IPMIB
def test_LLDP_MIB(self):
from ydk.models.cisco_ios_xe.LLDP_MIB import LldpChassisIdSubtype
from ydk.models.cisco_ios_xe.LLDP_MIB import LldpPortIdSubtype
from ydk.models.cisco_ios_xe.LLDP_MIB import LldpManAddrIfSubtype
from ydk.models.cisco_ios_xe.LLDP_MIB import LLDPMIB
def test_MPLS_L3VPN_STD_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_L3VPN_STD_MIB import MplsL3VpnRtType
from ydk.models.cisco_ios_xe.MPLS_L3VPN_STD_MIB import MPLSL3VPNSTDMIB
def test_MPLS_LDP_GENERIC_STD_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_LDP_GENERIC_STD_MIB import MPLSLDPGENERICSTDMIB
def test_MPLS_LDP_STD_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB import MPLSLDPSTDMIB
def test_MPLS_LSR_STD_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_LSR_STD_MIB import MPLSLSRSTDMIB
def test_MPLS_TC_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_TC_MIB import MplsInitialCreationSource
from ydk.models.cisco_ios_xe.MPLS_TC_MIB import MplsLdpLabelTypes
def test_MPLS_TC_STD_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB import MplsLabelDistributionMethod
from ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB import MplsLdpLabelType
from ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB import MplsLspType
from ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB import MplsOwner
from ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB import MplsRetentionMode
from ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB import TeHopAddressType
def test_MPLS_TE_STD_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_TE_STD_MIB import MPLSTESTDMIB
def test_MPLS_VPN_MIB(self):
from ydk.models.cisco_ios_xe.MPLS_VPN_MIB import MPLSVPNMIB
def test_NHRP_MIB(self):
from ydk.models.cisco_ios_xe.NHRP_MIB import NHRPMIB
def test_NOTIFICATION_LOG_MIB(self):
from ydk.models.cisco_ios_xe.NOTIFICATION_LOG_MIB import NOTIFICATIONLOGMIB
def test_OSPF_MIB(self):
from ydk.models.cisco_ios_xe.OSPF_MIB import Status
from ydk.models.cisco_ios_xe.OSPF_MIB import OspfAuthenticationType
from ydk.models.cisco_ios_xe.OSPF_MIB import OSPFMIB
def test_OSPF_TRAP_MIB(self):
from ydk.models.cisco_ios_xe.OSPF_TRAP_MIB import OSPFTRAPMIB
def test_PIM_MIB(self):
from ydk.models.cisco_ios_xe.PIM_MIB import PIMMIB
def test_POWER_ETHERNET_MIB(self):
from ydk.models.cisco_ios_xe.POWER_ETHERNET_MIB import POWERETHERNETMIB
def test_P_BRIDGE_MIB(self):
from ydk.models.cisco_ios_xe.P_BRIDGE_MIB import EnabledStatus
from ydk.models.cisco_ios_xe.P_BRIDGE_MIB import PBRIDGEMIB
def test_PerfHist_TC_MIB(self):
pass
def test_Q_BRIDGE_MIB(self):
from ydk.models.cisco_ios_xe.Q_BRIDGE_MIB import QBRIDGEMIB
def test_RFC1155_SMI(self):
pass
def test_RFC1213_MIB(self):
from ydk.models.cisco_ios_xe.RFC1213_MIB import RFC1213MIB
def test_RFC1315_MIB(self):
from ydk.models.cisco_ios_xe.RFC1315_MIB import RFC1315MIB
def test_RFC_1212(self):
pass
def test_RFC_1215(self):
pass
def test_RMON2_MIB(self):
from ydk.models.cisco_ios_xe.RMON2_MIB import RMON2MIB
def test_RMON_MIB(self):
from ydk.models.cisco_ios_xe.RMON_MIB import Rmoneventsv2
from ydk.models.cisco_ios_xe.RMON_MIB import EntryStatus
from ydk.models.cisco_ios_xe.RMON_MIB import RMONMIB
def test_RSVP_MIB(self):
from ydk.models.cisco_ios_xe.RSVP_MIB import RsvpEncapsulation
from ydk.models.cisco_ios_xe.RSVP_MIB import RSVPMIB
def test_SNMP_FRAMEWORK_MIB(self):
from ydk.models.cisco_ios_xe.SNMP_FRAMEWORK_MIB import Snmpauthprotocols
from ydk.models.cisco_ios_xe.SNMP_FRAMEWORK_MIB import Snmpprivprotocols
from ydk.models.cisco_ios_xe.SNMP_FRAMEWORK_MIB import SnmpSecurityLevel
from ydk.models.cisco_ios_xe.SNMP_FRAMEWORK_MIB import SNMPFRAMEWORKMIB
def test_SNMP_PROXY_MIB(self):
from ydk.models.cisco_ios_xe.SNMP_PROXY_MIB import SNMPPROXYMIB
def test_SNMP_TARGET_MIB(self):
from ydk.models.cisco_ios_xe.SNMP_TARGET_MIB import SNMPTARGETMIB
def test_SNMPv2_MIB(self):
from ydk.models.cisco_ios_xe.SNMPv2_MIB import SNMPv2MIB
def test_SNMPv2_TC(self):
from ydk.models.cisco_ios_xe.SNMPv2_TC import TruthValue
from ydk.models.cisco_ios_xe.SNMPv2_TC import RowStatus
from ydk.models.cisco_ios_xe.SNMPv2_TC import StorageType
def test_SONET_MIB(self):
from ydk.models.cisco_ios_xe.SONET_MIB import SONETMIB
def test_TCP_MIB(self):
from ydk.models.cisco_ios_xe.TCP_MIB import TCPMIB
def test_TOKENRING_MIB(self):
from ydk.models.cisco_ios_xe.TOKENRING_MIB import Dot5Testinsertfunc
from ydk.models.cisco_ios_xe.TOKENRING_MIB import Dot5Testfullduplexloopback
from ydk.models.cisco_ios_xe.TOKENRING_MIB import Dot5Chipsetibm16
from ydk.models.cisco_ios_xe.TOKENRING_MIB import Dot5Chipsettitms380
from ydk.models.cisco_ios_xe.TOKENRING_MIB import Dot5Chipsettitms380C16
from ydk.models.cisco_ios_xe.TOKENRING_MIB import TOKENRINGMIB
def test_TOKEN_RING_RMON_MIB(self):
from ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB import EntryStatus
from ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB import TOKENRINGRMONMIB
def test_TUNNEL_MIB(self):
from ydk.models.cisco_ios_xe.TUNNEL_MIB import TUNNELMIB
def test_UDP_MIB(self):
from ydk.models.cisco_ios_xe.UDP_MIB import UDPMIB
def test_VPN_TC_STD_MIB(self):
pass
def test_cisco_bridge_common(self):
from ydk.models.cisco_ios_xe.cisco_bridge_common import MacLimitNotificationType
from ydk.models.cisco_ios_xe.cisco_bridge_common import NotifNone
from ydk.models.cisco_ios_xe.cisco_bridge_common import NotifSnmpTrap
from ydk.models.cisco_ios_xe.cisco_bridge_common import NotifSyslog
from ydk.models.cisco_ios_xe.cisco_bridge_common import NotifSyslogAndSnmpTrap
from ydk.models.cisco_ios_xe.cisco_bridge_common import EthTrafficClass
from ydk.models.cisco_ios_xe.cisco_bridge_common import MacAgingType
from ydk.models.cisco_ios_xe.cisco_bridge_common import MacLimitAction
from ydk.models.cisco_ios_xe.cisco_bridge_common import MacSecureAction
def test_cisco_bridge_domain(self):
from ydk.models.cisco_ios_xe.cisco_bridge_domain import BridgeDomainStateType
from ydk.models.cisco_ios_xe.cisco_bridge_domain import BridgeDomainConfig
from ydk.models.cisco_ios_xe.cisco_bridge_domain import BridgeDomainState
from ydk.models.cisco_ios_xe.cisco_bridge_domain import ClearBridgeDomain
from ydk.models.cisco_ios_xe.cisco_bridge_domain import ClearMacAddress
from ydk.models.cisco_ios_xe.cisco_bridge_domain import CreateParameterizedBridgeDomains
def test_cisco_ethernet(self):
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed10mb
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed100mb
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed1gb
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed10gb
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed40gb
from ydk.models.cisco_ios_xe.cisco_ethernet import EthIfSpeed100gb
def test_cisco_ia(self):
from ydk.models.cisco_ios_xe.cisco_ia import CiaSyncType
from ydk.models.cisco_ios_xe.cisco_ia import CiaLogLevel
from ydk.models.cisco_ios_xe.cisco_ia import OnepLogLevel
from ydk.models.cisco_ios_xe.cisco_ia import SyslogSeverity
from ydk.models.cisco_ios_xe.cisco_ia import SyncFrom
from ydk.models.cisco_ios_xe.cisco_ia import SaveConfig
from ydk.models.cisco_ios_xe.cisco_ia import IsSyncing
from ydk.models.cisco_ios_xe.cisco_ia import Checkpoint
from ydk.models.cisco_ios_xe.cisco_ia import Revert
from ydk.models.cisco_ios_xe.cisco_ia import Rollback
def test_cisco_ospf(self):
from ydk.models.cisco_ios_xe.cisco_ospf import OspfExternalType
from ydk.models.cisco_ios_xe.cisco_ospf import AccessListInOutType
from ydk.models.cisco_ios_xe.cisco_ospf import PrefixApplicability
from ydk.models.cisco_ios_xe.cisco_ospf import OspfLogAdj
def test_cisco_policy(self):
pass
def test_cisco_policy_filters(self):
pass
def test_cisco_policy_target(self):
pass
def test_cisco_pw(self):
from ydk.models.cisco_ios_xe.cisco_pw import PwEncapsulationType
from ydk.models.cisco_ios_xe.cisco_pw import PwEncapMpls
from ydk.models.cisco_ios_xe.cisco_pw import PwVcType
from ydk.models.cisco_ios_xe.cisco_pw import PwVcTypeEther
from ydk.models.cisco_ios_xe.cisco_pw import PwVcTypeVlan
from ydk.models.cisco_ios_xe.cisco_pw import PwVcTypeVlanPassthrough
from ydk.models.cisco_ios_xe.cisco_pw import PwLoadBalanceType
from ydk.models.cisco_ios_xe.cisco_pw import PwLbEthernetType
from ydk.models.cisco_ios_xe.cisco_pw import PwLbEthSrcMac
from ydk.models.cisco_ios_xe.cisco_pw import PwLbEthDstMac
from ydk.models.cisco_ios_xe.cisco_pw import PwLbEthSrcDstMac
from ydk.models.cisco_ios_xe.cisco_pw import PwLbIpType
from ydk.models.cisco_ios_xe.cisco_pw import PwLbIpSrcIp
from ydk.models.cisco_ios_xe.cisco_pw import PwLbIpDstIp
from ydk.models.cisco_ios_xe.cisco_pw import PwLbIpSrcDstIp
from ydk.models.cisco_ios_xe.cisco_pw import PwSignalingProtocolType
from ydk.models.cisco_ios_xe.cisco_pw import PwSignalingProtocolNone
from ydk.models.cisco_ios_xe.cisco_pw import PwSignalingProtocolLdp
from ydk.models.cisco_ios_xe.cisco_pw import PwSignalingProtocolBgp
from ydk.models.cisco_ios_xe.cisco_pw import PwSequencingType
from ydk.models.cisco_ios_xe.cisco_pw import PwSequencingReceive
from ydk.models.cisco_ios_xe.cisco_pw import PwSequencingTransmit
from ydk.models.cisco_ios_xe.cisco_pw import PwSequencingBoth
from ydk.models.cisco_ios_xe.cisco_pw import PwOperStateType
from ydk.models.cisco_ios_xe.cisco_pw import PseudowireConfig
from ydk.models.cisco_ios_xe.cisco_pw import PseudowireState
def test_cisco_routing_ext(self):
from ydk.models.cisco_ios_xe.cisco_routing_ext import Rip
from ydk.models.cisco_ios_xe.cisco_routing_ext import IsIs
from ydk.models.cisco_ios_xe.cisco_routing_ext import Bgp
from ydk.models.cisco_ios_xe.cisco_routing_ext import Eigrp
from ydk.models.cisco_ios_xe.cisco_routing_ext import Mobile
def test_cisco_self_mgmt(self):
from ydk.models.cisco_ios_xe.cisco_self_mgmt import NetconfYang
def test_cisco_smart_license(self):
from ydk.models.cisco_ios_xe.cisco_smart_license import NotifRegisterFailureEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import RegistrationStateEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import AuthorizationStateEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import UtilityReportingTypeEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import TransportTypeEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import EnforcementModeEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import ErrorEnum
from ydk.models.cisco_ios_xe.cisco_smart_license import RegisterIdToken
from ydk.models.cisco_ios_xe.cisco_smart_license import DeRegister
from ydk.models.cisco_ios_xe.cisco_smart_license import RenewId
from ydk.models.cisco_ios_xe.cisco_smart_license import RenewAuth
from ydk.models.cisco_ios_xe.cisco_smart_license import Licensing
def test_cisco_smart_license_errors(self):
pass
def test_cisco_storm_control(self):
from ydk.models.cisco_ios_xe.cisco_storm_control import StormControlAction
from ydk.models.cisco_ios_xe.cisco_storm_control import ActionDrop
from ydk.models.cisco_ios_xe.cisco_storm_control import ActionSnmpTrap
from ydk.models.cisco_ios_xe.cisco_storm_control import ActionShutdown
def test_cisco_xe_ietf_yang_push_ext(self):
from ydk.models.cisco_ios_xe.cisco_xe_ietf_yang_push_ext import EncodeTdl
from ydk.models.cisco_ios_xe.cisco_xe_ietf_yang_push_ext import TdlStream
def test_common_mpls_static(self):
from ydk.models.cisco_ios_xe.common_mpls_static import LspType
from ydk.models.cisco_ios_xe.common_mpls_static import LspIPv4
from ydk.models.cisco_ios_xe.common_mpls_static import LspIPv6
from ydk.models.cisco_ios_xe.common_mpls_static import LspVrf
from ydk.models.cisco_ios_xe.common_mpls_static import Lsp
from ydk.models.cisco_ios_xe.common_mpls_static import NexthopResolutionType
from ydk.models.cisco_ios_xe.common_mpls_static import StaticNexthop
from ydk.models.cisco_ios_xe.common_mpls_static import BgpRouteNexthop
from ydk.models.cisco_ios_xe.common_mpls_static import OspfRouteNexthop
from ydk.models.cisco_ios_xe.common_mpls_static import IsisRouteNexthop
from ydk.models.cisco_ios_xe.common_mpls_static import Hoptype
from ydk.models.cisco_ios_xe.common_mpls_static import MplsStatic
def test_common_mpls_types(self):
from ydk.models.cisco_ios_xe.common_mpls_types import IetfMplsLabel
def test_nvo(self):
from ydk.models.cisco_ios_xe.nvo import OverlayEncapType
from ydk.models.cisco_ios_xe.nvo import VxlanType
from ydk.models.cisco_ios_xe.nvo import NvgreType
from ydk.models.cisco_ios_xe.nvo import NvoInstances
def test_pim(self):
from ydk.models.cisco_ios_xe.pim import GroupToRpMappingMode
from ydk.models.cisco_ios_xe.pim import DmMappingMode
from ydk.models.cisco_ios_xe.pim import SmMappingMode
from ydk.models.cisco_ios_xe.pim import PimBidirMappingMode
from ydk.models.cisco_ios_xe.pim import SsmMappingMode
from ydk.models.cisco_ios_xe.pim import AsmMappingMode
from ydk.models.cisco_ios_xe.pim import OtherMappingMode
from ydk.models.cisco_ios_xe.pim import RouteProtocolType
from ydk.models.cisco_ios_xe.pim import MrouteProtocolType
from ydk.models.cisco_ios_xe.pim import PimMode
from ydk.models.cisco_ios_xe.pim import Origin
def test_policy_attr(self):
pass
def test_policy_types(self):
from ydk.models.ietf.policy_types import PolicyType
from ydk.models.ietf.policy_types import Qos
from ydk.models.ietf.policy_types import Pbr
from ydk.models.ietf.policy_types import PerfMon
from ydk.models.ietf.policy_types import AccessControl
from ydk.models.ietf.policy_types import Appnav
from ydk.models.ietf.policy_types import Control
from ydk.models.ietf.policy_types import Inspect
from ydk.models.ietf.policy_types import PacketService
from ydk.models.ietf.policy_types import Service
from ydk.models.ietf.policy_types import ClassType
from ydk.models.ietf.policy_types import QosClass
from ydk.models.ietf.policy_types import AccessControlClass
from ydk.models.ietf.policy_types import AppnavClass
from ydk.models.ietf.policy_types import ControlClass
from ydk.models.ietf.policy_types import InspectClass
from ydk.models.ietf.policy_types import Cos
from ydk.models.ietf.policy_types import CosInner
from ydk.models.ietf.policy_types import Ipv4AclName
from ydk.models.ietf.policy_types import Ipv6AclName
from ydk.models.ietf.policy_types import Ipv4Acl
from ydk.models.ietf.policy_types import Ipv6Acl
from ydk.models.ietf.policy_types import InputInterface
from ydk.models.ietf.policy_types import SrcMac
from ydk.models.ietf.policy_types import DstMac
from ydk.models.ietf.policy_types import MplsExpTop
from ydk.models.ietf.policy_types import MplsExpImp
from ydk.models.ietf.policy_types import PacketLength
from ydk.models.ietf.policy_types import Prec
from ydk.models.ietf.policy_types import QosGroup
from ydk.models.ietf.policy_types import Vlan
from ydk.models.ietf.policy_types import VlanInner
from ydk.models.ietf.policy_types import AtmClp
from ydk.models.ietf.policy_types import AtmVci
from ydk.models.ietf.policy_types import Dei
from ydk.models.ietf.policy_types import DeiInner
from ydk.models.ietf.policy_types import FlowIp
from ydk.models.ietf.policy_types import FlowRecord
from ydk.models.ietf.policy_types import FlowDe
from ydk.models.ietf.policy_types import FlowDlci
from ydk.models.ietf.policy_types import WlanUserPriority
from ydk.models.ietf.policy_types import DiscardClass
from ydk.models.ietf.policy_types import ClassMap
from ydk.models.ietf.policy_types import Metadata
from ydk.models.ietf.policy_types import Application
from ydk.models.ietf.policy_types import SecurityGroupName
from ydk.models.ietf.policy_types import SecurityGroupTag
from ydk.models.ietf.policy_types import IpRtp
from ydk.models.ietf.policy_types import Vpls
from ydk.models.ietf.policy_types import Metric
from ydk.models.ietf.policy_types import RateUnit
from ydk.models.ietf.policy_types import Direction
def test_policy_types(self):
from ydk.models.ietf.policy_types import PolicyType
from ydk.models.ietf.policy_types import Qos
from ydk.models.ietf.policy_types import Pbr
from ydk.models.ietf.policy_types import PerfMon
from ydk.models.ietf.policy_types import AccessControl
from ydk.models.ietf.policy_types import Appnav
from ydk.models.ietf.policy_types import Control
from ydk.models.ietf.policy_types import Inspect
from ydk.models.ietf.policy_types import PacketService
from ydk.models.ietf.policy_types import Service
from ydk.models.ietf.policy_types import ClassType
from ydk.models.ietf.policy_types import QosClass
from ydk.models.ietf.policy_types import AccessControlClass
from ydk.models.ietf.policy_types import AppnavClass
from ydk.models.ietf.policy_types import ControlClass
from ydk.models.ietf.policy_types import InspectClass
from ydk.models.ietf.policy_types import Cos
from ydk.models.ietf.policy_types import CosInner
from ydk.models.ietf.policy_types import Ipv4AclName
from ydk.models.ietf.policy_types import Ipv6AclName
from ydk.models.ietf.policy_types import Ipv4Acl
from ydk.models.ietf.policy_types import Ipv6Acl
from ydk.models.ietf.policy_types import InputInterface
from ydk.models.ietf.policy_types import SrcMac
from ydk.models.ietf.policy_types import DstMac
from ydk.models.ietf.policy_types import MplsExpTop
from ydk.models.ietf.policy_types import MplsExpImp
from ydk.models.ietf.policy_types import PacketLength
from ydk.models.ietf.policy_types import Prec
from ydk.models.ietf.policy_types import QosGroup
from ydk.models.ietf.policy_types import Vlan
from ydk.models.ietf.policy_types import VlanInner
from ydk.models.ietf.policy_types import AtmClp
from ydk.models.ietf.policy_types import AtmVci
from ydk.models.ietf.policy_types import Dei
from ydk.models.ietf.policy_types import DeiInner
from ydk.models.ietf.policy_types import FlowIp
from ydk.models.ietf.policy_types import FlowRecord
from ydk.models.ietf.policy_types import FlowDe
from ydk.models.ietf.policy_types import FlowDlci
from ydk.models.ietf.policy_types import WlanUserPriority
from ydk.models.ietf.policy_types import DiscardClass
from ydk.models.ietf.policy_types import ClassMap
from ydk.models.ietf.policy_types import Metadata
from ydk.models.ietf.policy_types import Application
from ydk.models.ietf.policy_types import SecurityGroupName
from ydk.models.ietf.policy_types import SecurityGroupTag
from ydk.models.ietf.policy_types import IpRtp
from ydk.models.ietf.policy_types import Vpls
from ydk.models.ietf.policy_types import Metric
from ydk.models.ietf.policy_types import RateUnit
from ydk.models.ietf.policy_types import Direction
def test_tailf_cli_extensions(self):
pass
def test_tailf_common(self):
pass
def test_tailf_common_monitoring(self):
pass
def test_tailf_common_query(self):
pass
def test_tailf_confd_monitoring(self):
from ydk.models.cisco_ios_xe.tailf_confd_monitoring import ConfdState
def test_tailf_meta_extensions(self):
pass
def test_tailf_netconf_inactive(self):
pass
def test_tailf_netconf_monitoring(self):
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import CliConsole
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import CliSsh
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import CliTcp
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import WebuiHttp
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import WebuiHttps
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import NetconfTcp
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import SnmpUdp
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import RestHttp
from ydk.models.cisco_ios_xe.tailf_netconf_monitoring import RestHttps
def test_tailf_netconf_query(self):
from ydk.models.cisco_ios_xe.tailf_netconf_query import StartQuery
from ydk.models.cisco_ios_xe.tailf_netconf_query import FetchQueryResult
from ydk.models.cisco_ios_xe.tailf_netconf_query import ResetQuery
from ydk.models.cisco_ios_xe.tailf_netconf_query import StopQuery
def test_tailf_netconf_transactions(self):
from ydk.models.cisco_ios_xe.tailf_netconf_transactions import StartTransaction
from ydk.models.cisco_ios_xe.tailf_netconf_transactions import PrepareTransaction
from ydk.models.cisco_ios_xe.tailf_netconf_transactions import CommitTransaction
from ydk.models.cisco_ios_xe.tailf_netconf_transactions import AbortTransaction
def test_tailf_rest_error(self):
pass
def test_tailf_rest_query(self):
pass
def test_tailf_xsd_types(self):
pass
if __name__ == '__main__':
unittest.main()
| 42.386703 | 115 | 0.786581 |
0696290241244bc8df6f492fdd1d863f11e762cb | 3,808 | py | Python | superset/migrations/versions/18e88e1cc004_making_audit_nullable.py | maartenbreddels/incubator-superset | 8fe6f126226c9c03161d50c91d4a948309842217 | [
"Apache-2.0"
] | 2 | 2018-03-01T02:23:17.000Z | 2019-12-10T08:39:27.000Z | superset/migrations/versions/18e88e1cc004_making_audit_nullable.py | maartenbreddels/incubator-superset | 8fe6f126226c9c03161d50c91d4a948309842217 | [
"Apache-2.0"
] | 9 | 2018-11-06T23:57:47.000Z | 2019-11-27T21:04:16.000Z | superset/migrations/versions/18e88e1cc004_making_audit_nullable.py | maartenbreddels/incubator-superset | 8fe6f126226c9c03161d50c91d4a948309842217 | [
"Apache-2.0"
] | 1 | 2019-12-25T08:03:34.000Z | 2019-12-25T08:03:34.000Z | # -*- coding: utf-8 -*-
"""making audit nullable
Revision ID: 18e88e1cc004
Revises: 430039611635
Create Date: 2016-03-13 21:30:24.833107
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '18e88e1cc004'
down_revision = '430039611635'
def upgrade():
try:
op.alter_column(
'clusters', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'clusters', 'created_on',
existing_type=sa.DATETIME(), nullable=True)
op.drop_constraint(None, 'columns', type_='foreignkey')
op.drop_constraint(None, 'columns', type_='foreignkey')
op.drop_column('columns', 'created_on')
op.drop_column('columns', 'created_by_fk')
op.drop_column('columns', 'changed_on')
op.drop_column('columns', 'changed_by_fk')
op.alter_column(
'css_templates', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'css_templates', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'dashboards', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'dashboards', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.create_unique_constraint(None, 'dashboards', ['slug'])
op.alter_column(
'datasources', 'changed_by_fk',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column(
'datasources', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'datasources', 'created_by_fk',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column(
'datasources', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'dbs', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'dbs', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'slices', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'slices', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'sql_metrics', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'sql_metrics', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'table_columns', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'table_columns', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'tables', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'tables', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'url', 'changed_on',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column(
'url', 'created_on',
existing_type=sa.DATETIME(),
nullable=True)
except Exception:
pass
def downgrade():
pass
| 30.709677 | 65 | 0.563288 |
5c88ae8b42d22cda84746b137a46d86bfa335390 | 1,651 | py | Python | orchestra/prompts/ask_ordinal.py | enihsyou/LeetCode | 230325d1dfd666ee53f304edf74c9c0f60f81d75 | [
"MIT"
] | null | null | null | orchestra/prompts/ask_ordinal.py | enihsyou/LeetCode | 230325d1dfd666ee53f304edf74c9c0f60f81d75 | [
"MIT"
] | null | null | null | orchestra/prompts/ask_ordinal.py | enihsyou/LeetCode | 230325d1dfd666ee53f304edf74c9c0f60f81d75 | [
"MIT"
] | null | null | null | # coding=utf-8
# promote user for question ordinal.
import re
import urllib.parse
from prompt_toolkit.document import Document
from prompt_toolkit.validation import ValidationError, Validator
from prompts import AskSession
from prompts.leetcode import fetch_problem_graphql
async def question(session: AskSession):
"""
Ask user for his answer on which LeetCode problem
he whats to anticipate.
"""
return await session.prompt(
message="Enter the problem URL from LeetCode site: ",
validator=LeetCodeUrlValidator(session)
)
class LeetCodeUrlValidator(Validator):
def __init__(self, session: AskSession):
self.session = session
def validate(self, document: Document) -> None:
raise AssertionError("should use async version of validate function.")
async def validate_async(self, document: Document) -> None:
(_, hostname, path, _, _, _) = urllib.parse.urlparse(
url=document.text)
if 'leetcode' not in hostname.lower():
raise ValidationError(
message='Please enter a URL points to LeetCode site.')
if (match := re.search(r'/problems/([^/]+)/?', path)) is None:
raise ValidationError(
message='Please enter a valid URL of LeetCode problem')
title_slug = match.group(1)
if (prob := await fetch_problem_graphql(hostname, title_slug)) is None:
raise ValidationError(
message=f'Cannot fetch metadata for problem "{title_slug}"')
else:
self.session.metadata = prob
prob["siteUrlPrefix"] = "https://" + hostname
| 31.150943 | 79 | 0.660812 |
720a31b57d7b9e30be3da16adcfa7945baae2267 | 869 | py | Python | test/test_error_data.py | apsinha-equinix/controlm-client | f24e0f935c82306074f4e4025cf62c217348dc3f | [
"MIT"
] | 1 | 2021-12-02T08:49:25.000Z | 2021-12-02T08:49:25.000Z | test/test_error_data.py | apsinha-equinix/controlm-client | f24e0f935c82306074f4e4025cf62c217348dc3f | [
"MIT"
] | null | null | null | test/test_error_data.py | apsinha-equinix/controlm-client | f24e0f935c82306074f4e4025cf62c217348dc3f | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.18.3
Contact: support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_client
from controlm_client.models.error_data import ErrorData # noqa: E501
from controlm_client.rest import ApiException
class TestErrorData(unittest.TestCase):
"""ErrorData unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testErrorData(self):
"""Test ErrorData"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_client.models.error_data.ErrorData() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.195122 | 79 | 0.693901 |
d870bd61c4500ac41e256d112f2a92d76014fb22 | 1,486 | py | Python | test.py | ykotseruba/Ped_Cross_Benchmark | f8ccf3a0a13afd4610d0ce6ad7bf92e934e8cc31 | [
"MIT"
] | 2 | 2021-01-04T11:50:58.000Z | 2021-01-04T15:31:47.000Z | test.py | ykotseruba/Ped_Cross_Benchmark | f8ccf3a0a13afd4610d0ce6ad7bf92e934e8cc31 | [
"MIT"
] | null | null | null | test.py | ykotseruba/Ped_Cross_Benchmark | f8ccf3a0a13afd4610d0ce6ad7bf92e934e8cc31 | [
"MIT"
] | null | null | null | from action_predict import action_prediction
from pie_data import PIE
from jaad_data import JAAD
import os
import sys
import yaml
def test_model(saved_files_path=None):
with open(os.path.join(saved_files_path, 'configs.yaml'), 'r') as yamlfile:
opts = yaml.safe_load(yamlfile)
print(opts)
model_opts = opts['model_opts']
data_opts = opts['data_opts']
net_opts = opts['net_opts']
tte = model_opts['time_to_event'] if isinstance(model_opts['time_to_event'], int) else \
model_opts['time_to_event'][1]
data_opts['min_track_size'] = model_opts['obs_length'] + tte
if model_opts['dataset'] == 'pie':
imdb = PIE(data_path=os.environ.copy()['PIE_PATH'])
imdb.get_data_stats()
elif model_opts['dataset'] == 'jaad':
imdb = JAAD(data_path=os.environ.copy()['JAAD_PATH'])
else:
raise ValueError("{} dataset is incorrect".format(model_opts['dataset']))
method_class = action_prediction(model_opts['model'])(**net_opts)
#beh_seq_train = imdb.generate_data_trajectory_sequence('train', **data_opts)
#saved_files_path = method_class.train(beh_seq_train, **train_opts, model_opts=model_opts)
beh_seq_test = imdb.generate_data_trajectory_sequence('test', **data_opts)
acc, auc, f1, precision, recall = method_class.test(beh_seq_test, saved_files_path)
if __name__ == '__main__':
saved_files_path = sys.argv[1]
test_model(saved_files_path=saved_files_path) | 38.102564 | 94 | 0.70323 |
5376d53a6cff74c8f3b6b1922837aaa615f75c9c | 518 | py | Python | tests/core/test_postgre_multiple_statements.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 5 | 2019-07-08T15:38:06.000Z | 2022-03-24T20:36:19.000Z | tests/core/test_postgre_multiple_statements.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 2 | 2019-07-07T23:26:32.000Z | 2020-06-04T07:43:24.000Z | tests/core/test_postgre_multiple_statements.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 1 | 2019-05-31T09:11:22.000Z | 2019-05-31T09:11:22.000Z | import pytest
import psycopg2
def test_postgre_multiple_statement_with_psycopg2_api(pysoni_client_connection_with_envvars):
excepted_statements = "1"
results = pysoni_client_connection_with_envvars.postgre_multiple_statements(
["DROP TABLE IF EXISTS temp",
"DROP TABLE IF EXISTS temp_1",
"CREATE TABLE temp (cover text)",
"INSERT INTO temp VALUES ('1')",
"ALTER TABLE temp rename to temp_1",
"SELECT * FROM temp_1"])
excepted_statements == results
| 28.777778 | 93 | 0.704633 |
e8be0ea22cabad6312e584e5744b1a5bb0fc55c8 | 50,251 | py | Python | sdk/python/pulumi_azure_native/storage/v20210201/storage_account.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storage/v20210201/storage_account.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storage/v20210201/storage_account.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['StorageAccountArgs', 'StorageAccount']
@pulumi.input_type
class StorageAccountArgs:
def __init__(__self__, *,
kind: pulumi.Input[Union[str, 'Kind']],
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['SkuArgs'],
access_tier: Optional[pulumi.Input['AccessTier']] = None,
account_name: Optional[pulumi.Input[str]] = None,
allow_blob_public_access: Optional[pulumi.Input[bool]] = None,
allow_shared_key_access: Optional[pulumi.Input[bool]] = None,
azure_files_identity_based_authentication: Optional[pulumi.Input['AzureFilesIdentityBasedAuthenticationArgs']] = None,
custom_domain: Optional[pulumi.Input['CustomDomainArgs']] = None,
enable_https_traffic_only: Optional[pulumi.Input[bool]] = None,
enable_nfs_v3: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input['EncryptionArgs']] = None,
extended_location: Optional[pulumi.Input['ExtendedLocationArgs']] = None,
identity: Optional[pulumi.Input['IdentityArgs']] = None,
is_hns_enabled: Optional[pulumi.Input[bool]] = None,
key_policy: Optional[pulumi.Input['KeyPolicyArgs']] = None,
large_file_shares_state: Optional[pulumi.Input[Union[str, 'LargeFileSharesState']]] = None,
location: Optional[pulumi.Input[str]] = None,
minimum_tls_version: Optional[pulumi.Input[Union[str, 'MinimumTlsVersion']]] = None,
network_rule_set: Optional[pulumi.Input['NetworkRuleSetArgs']] = None,
routing_preference: Optional[pulumi.Input['RoutingPreferenceArgs']] = None,
sas_policy: Optional[pulumi.Input['SasPolicyArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a StorageAccount resource.
:param pulumi.Input[Union[str, 'Kind']] kind: Required. Indicates the type of storage account.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input['SkuArgs'] sku: Required. Gets or sets the SKU name.
:param pulumi.Input['AccessTier'] access_tier: Required for storage accounts where kind = BlobStorage. The access tier used for billing.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param pulumi.Input[bool] allow_blob_public_access: Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
:param pulumi.Input[bool] allow_shared_key_access: Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
:param pulumi.Input['AzureFilesIdentityBasedAuthenticationArgs'] azure_files_identity_based_authentication: Provides the identity based authentication settings for Azure Files.
:param pulumi.Input['CustomDomainArgs'] custom_domain: User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
:param pulumi.Input[bool] enable_https_traffic_only: Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01.
:param pulumi.Input[bool] enable_nfs_v3: NFS 3.0 protocol support enabled if set to true.
:param pulumi.Input['EncryptionArgs'] encryption: Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled.
:param pulumi.Input['ExtendedLocationArgs'] extended_location: Optional. Set the extended location of the resource. If not set, the storage account will be created in Azure main region. Otherwise it will be created in the specified extended location
:param pulumi.Input['IdentityArgs'] identity: The identity of the resource.
:param pulumi.Input[bool] is_hns_enabled: Account HierarchicalNamespace enabled if sets to true.
:param pulumi.Input['KeyPolicyArgs'] key_policy: KeyPolicy assigned to the storage account.
:param pulumi.Input[Union[str, 'LargeFileSharesState']] large_file_shares_state: Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled.
:param pulumi.Input[str] location: Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed.
:param pulumi.Input[Union[str, 'MinimumTlsVersion']] minimum_tls_version: Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property.
:param pulumi.Input['NetworkRuleSetArgs'] network_rule_set: Network rule set
:param pulumi.Input['RoutingPreferenceArgs'] routing_preference: Maintains information about the network routing choice opted by the user for data transfer
:param pulumi.Input['SasPolicyArgs'] sas_policy: SasPolicy assigned to the storage account.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters.
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
if access_tier is not None:
pulumi.set(__self__, "access_tier", access_tier)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if allow_blob_public_access is not None:
pulumi.set(__self__, "allow_blob_public_access", allow_blob_public_access)
if allow_shared_key_access is not None:
pulumi.set(__self__, "allow_shared_key_access", allow_shared_key_access)
if azure_files_identity_based_authentication is not None:
pulumi.set(__self__, "azure_files_identity_based_authentication", azure_files_identity_based_authentication)
if custom_domain is not None:
pulumi.set(__self__, "custom_domain", custom_domain)
if enable_https_traffic_only is not None:
pulumi.set(__self__, "enable_https_traffic_only", enable_https_traffic_only)
if enable_nfs_v3 is not None:
pulumi.set(__self__, "enable_nfs_v3", enable_nfs_v3)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if extended_location is not None:
pulumi.set(__self__, "extended_location", extended_location)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if is_hns_enabled is not None:
pulumi.set(__self__, "is_hns_enabled", is_hns_enabled)
if key_policy is not None:
pulumi.set(__self__, "key_policy", key_policy)
if large_file_shares_state is not None:
pulumi.set(__self__, "large_file_shares_state", large_file_shares_state)
if location is not None:
pulumi.set(__self__, "location", location)
if minimum_tls_version is not None:
pulumi.set(__self__, "minimum_tls_version", minimum_tls_version)
if network_rule_set is not None:
pulumi.set(__self__, "network_rule_set", network_rule_set)
if routing_preference is not None:
pulumi.set(__self__, "routing_preference", routing_preference)
if sas_policy is not None:
pulumi.set(__self__, "sas_policy", sas_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'Kind']]:
"""
Required. Indicates the type of storage account.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'Kind']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['SkuArgs']:
"""
Required. Gets or sets the SKU name.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['SkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="accessTier")
def access_tier(self) -> Optional[pulumi.Input['AccessTier']]:
"""
Required for storage accounts where kind = BlobStorage. The access tier used for billing.
"""
return pulumi.get(self, "access_tier")
@access_tier.setter
def access_tier(self, value: Optional[pulumi.Input['AccessTier']]):
pulumi.set(self, "access_tier", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="allowBlobPublicAccess")
def allow_blob_public_access(self) -> Optional[pulumi.Input[bool]]:
"""
Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
"""
return pulumi.get(self, "allow_blob_public_access")
@allow_blob_public_access.setter
def allow_blob_public_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_blob_public_access", value)
@property
@pulumi.getter(name="allowSharedKeyAccess")
def allow_shared_key_access(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
"""
return pulumi.get(self, "allow_shared_key_access")
@allow_shared_key_access.setter
def allow_shared_key_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_shared_key_access", value)
@property
@pulumi.getter(name="azureFilesIdentityBasedAuthentication")
def azure_files_identity_based_authentication(self) -> Optional[pulumi.Input['AzureFilesIdentityBasedAuthenticationArgs']]:
"""
Provides the identity based authentication settings for Azure Files.
"""
return pulumi.get(self, "azure_files_identity_based_authentication")
@azure_files_identity_based_authentication.setter
def azure_files_identity_based_authentication(self, value: Optional[pulumi.Input['AzureFilesIdentityBasedAuthenticationArgs']]):
pulumi.set(self, "azure_files_identity_based_authentication", value)
@property
@pulumi.getter(name="customDomain")
def custom_domain(self) -> Optional[pulumi.Input['CustomDomainArgs']]:
"""
User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
"""
return pulumi.get(self, "custom_domain")
@custom_domain.setter
def custom_domain(self, value: Optional[pulumi.Input['CustomDomainArgs']]):
pulumi.set(self, "custom_domain", value)
@property
@pulumi.getter(name="enableHttpsTrafficOnly")
def enable_https_traffic_only(self) -> Optional[pulumi.Input[bool]]:
"""
Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01.
"""
return pulumi.get(self, "enable_https_traffic_only")
@enable_https_traffic_only.setter
def enable_https_traffic_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_https_traffic_only", value)
@property
@pulumi.getter(name="enableNfsV3")
def enable_nfs_v3(self) -> Optional[pulumi.Input[bool]]:
"""
NFS 3.0 protocol support enabled if set to true.
"""
return pulumi.get(self, "enable_nfs_v3")
@enable_nfs_v3.setter
def enable_nfs_v3(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_nfs_v3", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['EncryptionArgs']]:
"""
Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['EncryptionArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional[pulumi.Input['ExtendedLocationArgs']]:
"""
Optional. Set the extended location of the resource. If not set, the storage account will be created in Azure main region. Otherwise it will be created in the specified extended location
"""
return pulumi.get(self, "extended_location")
@extended_location.setter
def extended_location(self, value: Optional[pulumi.Input['ExtendedLocationArgs']]):
pulumi.set(self, "extended_location", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityArgs']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="isHnsEnabled")
def is_hns_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Account HierarchicalNamespace enabled if sets to true.
"""
return pulumi.get(self, "is_hns_enabled")
@is_hns_enabled.setter
def is_hns_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_hns_enabled", value)
@property
@pulumi.getter(name="keyPolicy")
def key_policy(self) -> Optional[pulumi.Input['KeyPolicyArgs']]:
"""
KeyPolicy assigned to the storage account.
"""
return pulumi.get(self, "key_policy")
@key_policy.setter
def key_policy(self, value: Optional[pulumi.Input['KeyPolicyArgs']]):
pulumi.set(self, "key_policy", value)
@property
@pulumi.getter(name="largeFileSharesState")
def large_file_shares_state(self) -> Optional[pulumi.Input[Union[str, 'LargeFileSharesState']]]:
"""
Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled.
"""
return pulumi.get(self, "large_file_shares_state")
@large_file_shares_state.setter
def large_file_shares_state(self, value: Optional[pulumi.Input[Union[str, 'LargeFileSharesState']]]):
pulumi.set(self, "large_file_shares_state", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="minimumTlsVersion")
def minimum_tls_version(self) -> Optional[pulumi.Input[Union[str, 'MinimumTlsVersion']]]:
"""
Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property.
"""
return pulumi.get(self, "minimum_tls_version")
@minimum_tls_version.setter
def minimum_tls_version(self, value: Optional[pulumi.Input[Union[str, 'MinimumTlsVersion']]]):
pulumi.set(self, "minimum_tls_version", value)
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> Optional[pulumi.Input['NetworkRuleSetArgs']]:
"""
Network rule set
"""
return pulumi.get(self, "network_rule_set")
@network_rule_set.setter
def network_rule_set(self, value: Optional[pulumi.Input['NetworkRuleSetArgs']]):
pulumi.set(self, "network_rule_set", value)
@property
@pulumi.getter(name="routingPreference")
def routing_preference(self) -> Optional[pulumi.Input['RoutingPreferenceArgs']]:
"""
Maintains information about the network routing choice opted by the user for data transfer
"""
return pulumi.get(self, "routing_preference")
@routing_preference.setter
def routing_preference(self, value: Optional[pulumi.Input['RoutingPreferenceArgs']]):
pulumi.set(self, "routing_preference", value)
@property
@pulumi.getter(name="sasPolicy")
def sas_policy(self) -> Optional[pulumi.Input['SasPolicyArgs']]:
"""
SasPolicy assigned to the storage account.
"""
return pulumi.get(self, "sas_policy")
@sas_policy.setter
def sas_policy(self, value: Optional[pulumi.Input['SasPolicyArgs']]):
pulumi.set(self, "sas_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class StorageAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_tier: Optional[pulumi.Input['AccessTier']] = None,
account_name: Optional[pulumi.Input[str]] = None,
allow_blob_public_access: Optional[pulumi.Input[bool]] = None,
allow_shared_key_access: Optional[pulumi.Input[bool]] = None,
azure_files_identity_based_authentication: Optional[pulumi.Input[pulumi.InputType['AzureFilesIdentityBasedAuthenticationArgs']]] = None,
custom_domain: Optional[pulumi.Input[pulumi.InputType['CustomDomainArgs']]] = None,
enable_https_traffic_only: Optional[pulumi.Input[bool]] = None,
enable_nfs_v3: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionArgs']]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
is_hns_enabled: Optional[pulumi.Input[bool]] = None,
key_policy: Optional[pulumi.Input[pulumi.InputType['KeyPolicyArgs']]] = None,
kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,
large_file_shares_state: Optional[pulumi.Input[Union[str, 'LargeFileSharesState']]] = None,
location: Optional[pulumi.Input[str]] = None,
minimum_tls_version: Optional[pulumi.Input[Union[str, 'MinimumTlsVersion']]] = None,
network_rule_set: Optional[pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_preference: Optional[pulumi.Input[pulumi.InputType['RoutingPreferenceArgs']]] = None,
sas_policy: Optional[pulumi.Input[pulumi.InputType['SasPolicyArgs']]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
The storage account.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['AccessTier'] access_tier: Required for storage accounts where kind = BlobStorage. The access tier used for billing.
:param pulumi.Input[str] account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param pulumi.Input[bool] allow_blob_public_access: Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
:param pulumi.Input[bool] allow_shared_key_access: Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
:param pulumi.Input[pulumi.InputType['AzureFilesIdentityBasedAuthenticationArgs']] azure_files_identity_based_authentication: Provides the identity based authentication settings for Azure Files.
:param pulumi.Input[pulumi.InputType['CustomDomainArgs']] custom_domain: User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property.
:param pulumi.Input[bool] enable_https_traffic_only: Allows https traffic only to storage service if sets to true. The default value is true since API version 2019-04-01.
:param pulumi.Input[bool] enable_nfs_v3: NFS 3.0 protocol support enabled if set to true.
:param pulumi.Input[pulumi.InputType['EncryptionArgs']] encryption: Not applicable. Azure Storage encryption is enabled for all storage accounts and cannot be disabled.
:param pulumi.Input[pulumi.InputType['ExtendedLocationArgs']] extended_location: Optional. Set the extended location of the resource. If not set, the storage account will be created in Azure main region. Otherwise it will be created in the specified extended location
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[bool] is_hns_enabled: Account HierarchicalNamespace enabled if sets to true.
:param pulumi.Input[pulumi.InputType['KeyPolicyArgs']] key_policy: KeyPolicy assigned to the storage account.
:param pulumi.Input[Union[str, 'Kind']] kind: Required. Indicates the type of storage account.
:param pulumi.Input[Union[str, 'LargeFileSharesState']] large_file_shares_state: Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled.
:param pulumi.Input[str] location: Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed.
:param pulumi.Input[Union[str, 'MinimumTlsVersion']] minimum_tls_version: Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property.
:param pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']] network_rule_set: Network rule set
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['RoutingPreferenceArgs']] routing_preference: Maintains information about the network routing choice opted by the user for data transfer
:param pulumi.Input[pulumi.InputType['SasPolicyArgs']] sas_policy: SasPolicy assigned to the storage account.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: Required. Gets or sets the SKU name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The storage account.
:param str resource_name: The name of the resource.
:param StorageAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_tier: Optional[pulumi.Input['AccessTier']] = None,
account_name: Optional[pulumi.Input[str]] = None,
allow_blob_public_access: Optional[pulumi.Input[bool]] = None,
allow_shared_key_access: Optional[pulumi.Input[bool]] = None,
azure_files_identity_based_authentication: Optional[pulumi.Input[pulumi.InputType['AzureFilesIdentityBasedAuthenticationArgs']]] = None,
custom_domain: Optional[pulumi.Input[pulumi.InputType['CustomDomainArgs']]] = None,
enable_https_traffic_only: Optional[pulumi.Input[bool]] = None,
enable_nfs_v3: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionArgs']]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
is_hns_enabled: Optional[pulumi.Input[bool]] = None,
key_policy: Optional[pulumi.Input[pulumi.InputType['KeyPolicyArgs']]] = None,
kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,
large_file_shares_state: Optional[pulumi.Input[Union[str, 'LargeFileSharesState']]] = None,
location: Optional[pulumi.Input[str]] = None,
minimum_tls_version: Optional[pulumi.Input[Union[str, 'MinimumTlsVersion']]] = None,
network_rule_set: Optional[pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_preference: Optional[pulumi.Input[pulumi.InputType['RoutingPreferenceArgs']]] = None,
sas_policy: Optional[pulumi.Input[pulumi.InputType['SasPolicyArgs']]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageAccountArgs.__new__(StorageAccountArgs)
__props__.__dict__["access_tier"] = access_tier
__props__.__dict__["account_name"] = account_name
__props__.__dict__["allow_blob_public_access"] = allow_blob_public_access
__props__.__dict__["allow_shared_key_access"] = allow_shared_key_access
__props__.__dict__["azure_files_identity_based_authentication"] = azure_files_identity_based_authentication
__props__.__dict__["custom_domain"] = custom_domain
__props__.__dict__["enable_https_traffic_only"] = enable_https_traffic_only
__props__.__dict__["enable_nfs_v3"] = enable_nfs_v3
__props__.__dict__["encryption"] = encryption
__props__.__dict__["extended_location"] = extended_location
__props__.__dict__["identity"] = identity
__props__.__dict__["is_hns_enabled"] = is_hns_enabled
__props__.__dict__["key_policy"] = key_policy
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
__props__.__dict__["large_file_shares_state"] = large_file_shares_state
__props__.__dict__["location"] = location
__props__.__dict__["minimum_tls_version"] = minimum_tls_version
__props__.__dict__["network_rule_set"] = network_rule_set
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["routing_preference"] = routing_preference
__props__.__dict__["sas_policy"] = sas_policy
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["blob_restore_status"] = None
__props__.__dict__["creation_time"] = None
__props__.__dict__["failover_in_progress"] = None
__props__.__dict__["geo_replication_stats"] = None
__props__.__dict__["key_creation_time"] = None
__props__.__dict__["last_geo_failover_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["primary_endpoints"] = None
__props__.__dict__["primary_location"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["secondary_endpoints"] = None
__props__.__dict__["secondary_location"] = None
__props__.__dict__["status_of_primary"] = None
__props__.__dict__["status_of_secondary"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storage/v20210201:StorageAccount"), pulumi.Alias(type_="azure-native:storage:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20150501preview:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20150501preview:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20150615:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20150615:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20160101:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20160101:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20160501:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20160501:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20161201:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20161201:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20170601:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20170601:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20171001:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20171001:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20180201:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20180201:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20180301preview:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20180301preview:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20180701:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20180701:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20181101:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20181101:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20190401:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20190401:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20190601:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20190601:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20200801preview:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20200801preview:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20210101:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20210101:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20210401:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20210401:StorageAccount"), pulumi.Alias(type_="azure-native:storage/v20210601:StorageAccount"), pulumi.Alias(type_="azure-nextgen:storage/v20210601:StorageAccount")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StorageAccount, __self__).__init__(
'azure-native:storage/v20210201:StorageAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageAccount':
"""
Get an existing StorageAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageAccountArgs.__new__(StorageAccountArgs)
__props__.__dict__["access_tier"] = None
__props__.__dict__["allow_blob_public_access"] = None
__props__.__dict__["allow_shared_key_access"] = None
__props__.__dict__["azure_files_identity_based_authentication"] = None
__props__.__dict__["blob_restore_status"] = None
__props__.__dict__["creation_time"] = None
__props__.__dict__["custom_domain"] = None
__props__.__dict__["enable_https_traffic_only"] = None
__props__.__dict__["enable_nfs_v3"] = None
__props__.__dict__["encryption"] = None
__props__.__dict__["extended_location"] = None
__props__.__dict__["failover_in_progress"] = None
__props__.__dict__["geo_replication_stats"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["is_hns_enabled"] = None
__props__.__dict__["key_creation_time"] = None
__props__.__dict__["key_policy"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["large_file_shares_state"] = None
__props__.__dict__["last_geo_failover_time"] = None
__props__.__dict__["location"] = None
__props__.__dict__["minimum_tls_version"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_rule_set"] = None
__props__.__dict__["primary_endpoints"] = None
__props__.__dict__["primary_location"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["routing_preference"] = None
__props__.__dict__["sas_policy"] = None
__props__.__dict__["secondary_endpoints"] = None
__props__.__dict__["secondary_location"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["status_of_primary"] = None
__props__.__dict__["status_of_secondary"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return StorageAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessTier")
def access_tier(self) -> pulumi.Output[str]:
"""
Required for storage accounts where kind = BlobStorage. The access tier used for billing.
"""
return pulumi.get(self, "access_tier")
@property
@pulumi.getter(name="allowBlobPublicAccess")
def allow_blob_public_access(self) -> pulumi.Output[Optional[bool]]:
"""
Allow or disallow public access to all blobs or containers in the storage account. The default interpretation is true for this property.
"""
return pulumi.get(self, "allow_blob_public_access")
@property
@pulumi.getter(name="allowSharedKeyAccess")
def allow_shared_key_access(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the storage account permits requests to be authorized with the account access key via Shared Key. If false, then all requests, including shared access signatures, must be authorized with Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.
"""
return pulumi.get(self, "allow_shared_key_access")
@property
@pulumi.getter(name="azureFilesIdentityBasedAuthentication")
def azure_files_identity_based_authentication(self) -> pulumi.Output[Optional['outputs.AzureFilesIdentityBasedAuthenticationResponse']]:
"""
Provides the identity based authentication settings for Azure Files.
"""
return pulumi.get(self, "azure_files_identity_based_authentication")
@property
@pulumi.getter(name="blobRestoreStatus")
def blob_restore_status(self) -> pulumi.Output['outputs.BlobRestoreStatusResponse']:
"""
Blob restore status
"""
return pulumi.get(self, "blob_restore_status")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> pulumi.Output[str]:
"""
Gets the creation date and time of the storage account in UTC.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="customDomain")
def custom_domain(self) -> pulumi.Output['outputs.CustomDomainResponse']:
"""
Gets the custom domain the user assigned to this storage account.
"""
return pulumi.get(self, "custom_domain")
@property
@pulumi.getter(name="enableHttpsTrafficOnly")
def enable_https_traffic_only(self) -> pulumi.Output[Optional[bool]]:
"""
Allows https traffic only to storage service if sets to true.
"""
return pulumi.get(self, "enable_https_traffic_only")
@property
@pulumi.getter(name="enableNfsV3")
def enable_nfs_v3(self) -> pulumi.Output[Optional[bool]]:
"""
NFS 3.0 protocol support enabled if set to true.
"""
return pulumi.get(self, "enable_nfs_v3")
@property
@pulumi.getter
def encryption(self) -> pulumi.Output['outputs.EncryptionResponse']:
"""
Gets the encryption settings on the account. If unspecified, the account is unencrypted.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:
"""
The extendedLocation of the resource.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="failoverInProgress")
def failover_in_progress(self) -> pulumi.Output[bool]:
"""
If the failover is in progress, the value will be true, otherwise, it will be null.
"""
return pulumi.get(self, "failover_in_progress")
@property
@pulumi.getter(name="geoReplicationStats")
def geo_replication_stats(self) -> pulumi.Output['outputs.GeoReplicationStatsResponse']:
"""
Geo Replication Stats
"""
return pulumi.get(self, "geo_replication_stats")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="isHnsEnabled")
def is_hns_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Account HierarchicalNamespace enabled if sets to true.
"""
return pulumi.get(self, "is_hns_enabled")
@property
@pulumi.getter(name="keyCreationTime")
def key_creation_time(self) -> pulumi.Output['outputs.KeyCreationTimeResponse']:
"""
Storage account keys creation time.
"""
return pulumi.get(self, "key_creation_time")
@property
@pulumi.getter(name="keyPolicy")
def key_policy(self) -> pulumi.Output['outputs.KeyPolicyResponse']:
"""
KeyPolicy assigned to the storage account.
"""
return pulumi.get(self, "key_policy")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Gets the Kind.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="largeFileSharesState")
def large_file_shares_state(self) -> pulumi.Output[Optional[str]]:
"""
Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled.
"""
return pulumi.get(self, "large_file_shares_state")
@property
@pulumi.getter(name="lastGeoFailoverTime")
def last_geo_failover_time(self) -> pulumi.Output[str]:
"""
Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS.
"""
return pulumi.get(self, "last_geo_failover_time")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="minimumTlsVersion")
def minimum_tls_version(self) -> pulumi.Output[Optional[str]]:
"""
Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property.
"""
return pulumi.get(self, "minimum_tls_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> pulumi.Output['outputs.NetworkRuleSetResponse']:
"""
Network rule set
"""
return pulumi.get(self, "network_rule_set")
@property
@pulumi.getter(name="primaryEndpoints")
def primary_endpoints(self) -> pulumi.Output['outputs.EndpointsResponse']:
"""
Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint.
"""
return pulumi.get(self, "primary_endpoints")
@property
@pulumi.getter(name="primaryLocation")
def primary_location(self) -> pulumi.Output[str]:
"""
Gets the location of the primary data center for the storage account.
"""
return pulumi.get(self, "primary_location")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
List of private endpoint connection associated with the specified storage account
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Gets the status of the storage account at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routingPreference")
def routing_preference(self) -> pulumi.Output[Optional['outputs.RoutingPreferenceResponse']]:
"""
Maintains information about the network routing choice opted by the user for data transfer
"""
return pulumi.get(self, "routing_preference")
@property
@pulumi.getter(name="sasPolicy")
def sas_policy(self) -> pulumi.Output['outputs.SasPolicyResponse']:
"""
SasPolicy assigned to the storage account.
"""
return pulumi.get(self, "sas_policy")
@property
@pulumi.getter(name="secondaryEndpoints")
def secondary_endpoints(self) -> pulumi.Output['outputs.EndpointsResponse']:
"""
Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS.
"""
return pulumi.get(self, "secondary_endpoints")
@property
@pulumi.getter(name="secondaryLocation")
def secondary_location(self) -> pulumi.Output[str]:
"""
Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS.
"""
return pulumi.get(self, "secondary_location")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.SkuResponse']:
"""
Gets the SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="statusOfPrimary")
def status_of_primary(self) -> pulumi.Output[str]:
"""
Gets the status indicating whether the primary location of the storage account is available or unavailable.
"""
return pulumi.get(self, "status_of_primary")
@property
@pulumi.getter(name="statusOfSecondary")
def status_of_secondary(self) -> pulumi.Output[str]:
"""
Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS.
"""
return pulumi.get(self, "status_of_secondary")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 55.220879 | 2,647 | 0.69133 |
7ac035c5184b8222ac2167f1647e169bc47c3be7 | 939 | py | Python | server/clean_file_names.py | mmkuznecov/RedListHack | a14efd035e9eaf99e86a3c50a60e493898f060ae | [
"Apache-2.0"
] | null | null | null | server/clean_file_names.py | mmkuznecov/RedListHack | a14efd035e9eaf99e86a3c50a60e493898f060ae | [
"Apache-2.0"
] | null | null | null | server/clean_file_names.py | mmkuznecov/RedListHack | a14efd035e9eaf99e86a3c50a60e493898f060ae | [
"Apache-2.0"
] | null | null | null | from shutil import copyfile
from transliterate import translit
import re
import os
def has_cyrillic(text):
return bool(re.search('[а-яА-Я]', text))
def clean_file_name(fname):
if ' ' in fname or '—' in fname or has_cyrillic(fname):
fname = fname.replace(' ', '_')
fname = fname.replace('—', '_')
fname = translit(fname, language_code='ru', reversed=True)
return fname
def clean_file_names(data_paths, classes):
for data_path in data_paths:
for class_id, class_name in enumerate(classes):
for fname in os.listdir(os.path.join(data_path, class_name, 'images')):
fname_ = clean_file_name(fname)
if fname_ != fname:
src = os.path.join(data_path, class_name, 'images', fname)
dst = os.path.join(data_path, class_name, 'images', fname)
copyfile(src, dst)
os.remove(src) | 37.56 | 83 | 0.608094 |