id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
506980 | import os
from fastapi import FastAPI, File, UploadFile
import cv2 as cv
from mtcnn_cv2 import MTCNN
from torchvision import transforms
from PIL import Image
import torch
import numpy as np
device = 'cpu'
emotion_model = torch.load('./weights/EmotionNet_b27.pt',
map_location=torch.device('cpu'))
emotion_model.eval()
idx_to_class = {0: 'Anger', 1: 'Disgust', 2: 'Fear',
3: 'Happiness', 4: 'Neutral', 5: 'Sadness', 6: 'Surprise'}
test_transforms = transforms.Compose(
[
transforms.Resize((260, 260)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]
)
detector = MTCNN()
emotion_detector = emotion_model
def infer(image):
faces = detector.detect_faces(image)
counter = 0
emotion_list = []
for bx in faces:
box = bx['box']
cropped = image[box[1]:box[1]+box[3],
box[0]:box[0] + box[2]]
counter += 1
img_tensor = test_transforms(Image.fromarray(cropped))
img_tensor.unsqueeze_(0)
with torch.no_grad():
scores = emotion_model(img_tensor.to(device))
scores = scores[0].data.cpu().numpy()
emotion = idx_to_class[np.argmax(scores)]
emotion_list.append(emotion)
counter = 0
for em in emotion_list:
faces[counter]["emotion"] = em
counter += 1
return faces
app = FastAPI(
title="Facial detection and emotion recognition services",
version="0.8",
contact={
"name": "<NAME>",
"email": "<EMAIL>",
"github": 'https://github.com/aliaminibagh'
}
)
@app.post("/infer", tags=["Inference"])
async def Infer(image: UploadFile = File(...)):
if not os.path.exists('./uploaded_images'):
os.makedirs('./uploaded_images')
tempName = os.path.join("uploaded_images", 'temp.jpg')
with open((tempName), "wb+") as file_object:
file_object.write(image.file.read())
img = cv.imread(tempName)
results = infer(img)
if len(results) == 0:
{'status': 201, 'message': 'no faces detected'}
return ({f'Face_{num+1}': i['box'], 'Confidence': round(i['confidence'], 4), 'Emotion': i['emotion']} for num, i in enumerate(results))
|
506986 | import unittest
import influxgraph.utils
from influxgraph.constants import DEFAULT_AGGREGATIONS
import datetime
class InfluxGraphUtilsTestCase(unittest.TestCase):
def test_interval_calculation(self):
start_time, end_time = (datetime.datetime.now() - datetime.timedelta(days=2)), \
datetime.datetime.now()
interval = influxgraph.utils.calculate_interval(int(start_time.strftime("%s")),
int(end_time.strftime("%s")))
self.assertEqual(interval, 60,
msg="Expected interval of 60s for start/end times %s-%s, got %s" % (
start_time, end_time, interval))
# More than 4 years time range
start_time, end_time = (datetime.datetime.now() - datetime.timedelta(days=1461)), \
datetime.datetime.now()
interval = influxgraph.utils.calculate_interval(int(start_time.strftime("%s")),
int(end_time.strftime("%s")))
self.assertEqual(interval, 86400,
msg="Expected interval of 1day/86400s for start/end times %s-%s, got %s" % (
start_time, end_time, interval))
def test_get_retention_policy(self):
policies = {60: 'default', 600: '10min', 1800: '30min'}
for interval, _retention in policies.items():
retention = influxgraph.utils.get_retention_policy(
interval, policies)
self.assertEqual(retention, _retention,
msg="Expected retention period %s for interval %s, got %s" % (
_retention, interval, retention,))
policy = influxgraph.utils.get_retention_policy(1900, policies)
self.assertEqual(policy,'30min',
msg="Expected retention policy %s for interval %s - got %s" % (
'30min', 1900, policy))
self.assertFalse(influxgraph.utils.get_retention_policy(60, None))
def test_aggregation_functions(self):
config = {'aggregation_functions': {
'\.min$' : 'min',
'pattern' : 'notvalidagg',
'notvalidpattern[' : 'sum',
}}
aggregation_functions = influxgraph.utils._compile_aggregation_patterns(
config.get('aggregation_functions', DEFAULT_AGGREGATIONS))
self.assertTrue(config.get('aggregation_functions', None) is not None,
msg="Aggregation functions are empty")
self.assertTrue('notvalidagg' not in aggregation_functions,
msg="Expected invalid aggregation function '%s' to not be in parsed functions" % (
'notvalidagg',))
self.assertTrue('notvalidpattern[' not in aggregation_functions,
msg="Expected invalid regex pattern '%s' to not be in parsed functions" % (
'notvalidpattern[',))
path = 'my.path.min'
func = influxgraph.utils.get_aggregation_func(path, aggregation_functions)
self.assertTrue(func == 'min',
msg="Expected aggregation function 'min' for path '%s' - got '%s' instead" % (
path, func))
path = 'my.path.not.in.config'
func = influxgraph.utils.get_aggregation_func(path, aggregation_functions)
self.assertTrue(func == 'mean',
msg="Expected aggregation function 'mean' for path '%s' - got '%s' instead" % (
path, func))
def test_empty_aggregation_functions(self):
self.assertFalse(influxgraph.utils._compile_aggregation_patterns(None))
def test_parse_empty_template(self):
self.assertFalse(influxgraph.templates.parse_influxdb_graphite_templates(['']))
|
506990 | import torch
import torch.nn as nn
import torchvision
import os
import pickle
import scipy.io
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from torch import optim
from model import G12, G21
from model import D1, D2
class Solver(object):
def __init__(self, config, svhn_loader, mnist_loader):
self.svhn_loader = svhn_loader
self.mnist_loader = mnist_loader
self.g12 = None
self.g21 = None
self.d1 = None
self.d2 = None
self.g_optimizer = None
self.d_optimizer = None
self.use_reconst_loss = config.use_reconst_loss
self.use_distance_loss = config.use_distance_loss
self.use_self_distance = config.use_self_distance
self.num_classes = config.num_classes
self.beta1 = config.beta1
self.beta2 = config.beta2
self.g_conv_dim = config.g_conv_dim
self.d_conv_dim = config.d_conv_dim
self.train_iters = config.train_iters
self.batch_size = config.batch_size
self.lr = config.lr
self.log_step = config.log_step
self.sample_step = config.sample_step
self.sample_path = config.sample_path
self.model_path = config.model_path
self.lambda_distance_A = config.lambda_distance_A
self.lambda_distance_B = config.lambda_distance_B
self.config = config
self.build_model()
def build_model(self):
"""Builds a generator and a discriminator."""
self.g12 = G12(self.config, conv_dim=self.g_conv_dim)
self.g21 = G21(self.config, conv_dim=self.g_conv_dim)
self.d1 = D1(conv_dim=self.d_conv_dim)
self.d2 = D2(conv_dim=self.d_conv_dim)
g_params = list(self.g12.parameters()) + list(self.g21.parameters())
d_params = list(self.d1.parameters()) + list(self.d2.parameters())
self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])
self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])
if torch.cuda.is_available():
self.g12.cuda()
self.g21.cuda()
self.d1.cuda()
self.d2.cuda()
def merge_images(self, sources, targets, k=10):
_, _, h, w = sources.shape
row = int(np.sqrt(self.batch_size))
merged = np.zeros([3, row*h, row*w*2])
for idx, (s, t) in enumerate(zip(sources, targets)):
i = idx // row
j = idx % row
merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s
merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t
return merged.transpose(1, 2, 0)
def to_var(self, x):
"""Converts numpy to variable."""
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def to_data(self, x):
"""Converts variable to numpy."""
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy()
def reset_grad(self):
"""Zeros the gradient buffers."""
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def distance(self, A, B):
return torch.mean(torch.abs(A - B))
def get_individual_distance_loss(self, A_i, A_j, AB_i, AB_j, A_to_AB):
distance_in_A = self.distance(A_i, A_j)
distance_in_AB = self.distance(AB_i, AB_j)
if self.normalize_distances:
if A_to_AB:
distance_in_A = (distance_in_A - self.expectation_A) / self.std_A
distance_in_AB = (distance_in_AB - self.expectation_B) / self.std_B
else:
distance_in_A = (distance_in_A - self.expectation_B) / self.std_B
distance_in_AB = (distance_in_AB - self.expectation_A) / self.std_A
return torch.abs(distance_in_A - distance_in_AB)
def get_self_distances(self, A, AB, A_to_AB=True):
A_half_1, A_half_2 = torch.chunk(A, 2, dim=2)
AB_half_1, AB_half_2 = torch.chunk(AB, 2, dim=2)
l_distance_A = \
self.get_individual_distance_loss(A_half_1, A_half_2,
AB_half_1, AB_half_2, A_to_AB)
return l_distance_A
def get_distance_losses(self, A, AB, A_to_AB=True ):
As = torch.split(A, 1)
ABs = torch.split(AB, 1)
loss_distance_A = 0.0
num_pairs = 0
min_length = len(As)
for i in xrange(min_length - 1):
for j in xrange(i + 1, min_length):
num_pairs += 1
loss_distance_A_ij = \
self.get_individual_distance_loss(As[i], As[j],
ABs[i], ABs[j], A_to_AB)
loss_distance_A += loss_distance_A_ij
loss_distance_A = loss_distance_A / num_pairs
return loss_distance_A
def get_std(self, num_items, vars, expectation):
num_pairs = 0
std_sum = 0.0
# If self distance computed std for top and bottom half
if self.use_self_distance:
for i in xrange(num_items):
var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
return np.sqrt(std_sum / num_items)
# Otherwise compute std for all pairs of images
for i in xrange(num_items - 1):
for j in xrange(i + 1, num_items):
num_pairs += 1
std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)
return np.sqrt(std_sum / num_pairs)
def get_expectation(self, num_items, vars):
num_pairs = 0
distance_sum = 0.0
# If self distance computed expectation for top and bottom half
if self.use_self_distance:
for i in xrange(num_items):
# Split image to top and bottom half
var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
distance_sum += self.as_np(self.distance(var_half_1, var_half_2))
return distance_sum / num_items
# Otherwise compute expectation for all pairs of images
for i in xrange(num_items - 1):
for j in xrange(i + 1, num_items):
num_pairs += 1
distance_sum += self.as_np(self.distance(vars[i], vars[j]))
return distance_sum / num_pairs
def set_expectation_and_std(self):
max_items = self.config.max_items
A_vars = []
B_vars = []
num_vars_A = 0
num_vars_B = 0
mnist_iter = iter(self.mnist_loader)
for step in range(len(mnist_iter)):
if step >= max_items:
break
mnist, m_labels = mnist_iter.next()
A = Variable(mnist, volatile=True)
if A.size()[0] != self.config.batch_size:
continue
A_vars.append(A)
num_vars_A += 1
svhn_iter = iter(self.svhn_loader)
for step in range(len(svhn_iter)):
if step >= max_items:
break
svhn, s_labels = svhn_iter.next()
B = Variable(svhn, volatile=True)
if B.size()[0] != self.config.batch_size:
continue
B_vars.append(B)
num_vars_B +=1
self.expectation_A = self.get_expectation(num_vars_A, A_vars)[0].astype(float)
self.expectation_B = self.get_expectation(num_vars_B, B_vars)[0].astype(float)
self.std_A = self.get_std(num_vars_A, A_vars, self.expectation_A)[0].astype(float)
self.std_B = self.get_std(num_vars_B, B_vars, self.expectation_B)[0].astype(float)
print('Expectation for dataset A: %f' % self.expectation_A)
print('Expectation for dataset B: %f' % self.expectation_B)
print('Std for dataset A: %f' % self.std_A)
print('Std for dataset B: %f' % self.std_B)
def as_np(self, data):
return data.cpu().data.numpy()
def train(self, svhn_test_loader, mnist_test_loader):
svhn_iter = iter(self.svhn_loader)
mnist_iter = iter(self.mnist_loader)
iter_per_epoch = min(len(svhn_iter), len(mnist_iter)) -1
# fixed mnist and svhn for sampling
svhn_test_iter = iter(svhn_test_loader)
mnist_test_iter = iter(mnist_test_loader)
fixed_svhn = self.to_var(svhn_test_iter.next()[0])
fixed_mnist = self.to_var(mnist_test_iter.next()[0])
self.normalize_distances = not self.config.unnormalized_distances
if (self.use_self_distance or self.use_distance_loss) and self.normalize_distances:
self.set_expectation_and_std()
for step in range(self.train_iters+1):
# reset data_iter for each epoch
if (step+1) % iter_per_epoch == 0:
mnist_iter = iter(self.mnist_loader)
svhn_iter = iter(self.svhn_loader)
# load svhn and mnist dataset
svhn, s_labels = svhn_iter.next()
svhn, s_labels = self.to_var(svhn), self.to_var(s_labels).long().squeeze()
mnist, m_labels = mnist_iter.next()
mnist, m_labels = self.to_var(mnist), self.to_var(m_labels)
#============ train D ============#
# train with real images
self.reset_grad()
out = self.d1(mnist)
d1_loss = torch.mean((out-1)**2)
out = self.d2(svhn)
d2_loss = torch.mean((out-1)**2)
d_mnist_loss = d1_loss
d_svhn_loss = d2_loss
d_real_loss = d1_loss + d2_loss
d_real_loss.backward()
self.d_optimizer.step()
# train with fake images
self.reset_grad()
fake_svhn = self.g12(mnist)
out = self.d2(fake_svhn)
d2_loss = torch.mean(out**2)
fake_mnist = self.g21(svhn)
out = self.d1(fake_mnist)
d1_loss = torch.mean(out**2)
d_fake_loss = d1_loss + d2_loss
d_fake_loss.backward()
self.d_optimizer.step()
#============ train G ============#
# train mnist-svhn-mnist cycle
self.reset_grad()
fake_svhn = self.g12(mnist)
out_svhn = self.d2(fake_svhn)
reconst_mnist = self.g21(fake_svhn)
gen_loss_A = torch.mean((out_svhn-1)**2)
g_loss = gen_loss_A
if self.use_reconst_loss:
reconst_loss_A = torch.mean((mnist - reconst_mnist) ** 2)
g_loss += reconst_loss_A
if self.use_distance_loss:
dist_A = self.get_distance_losses(mnist, fake_svhn, A_to_AB=True) * self.lambda_distance_A
g_loss += dist_A
elif self.use_self_distance:
dist_A = self.get_self_distances(mnist, fake_svhn, A_to_AB=True) * self.lambda_distance_A
g_loss += dist_A
g_loss.backward()
self.g_optimizer.step()
# train svhn-mnist-svhn cycle
self.reset_grad()
fake_mnist = self.g21(svhn)
out_mnist = self.d1(fake_mnist)
reconst_svhn = self.g12(fake_mnist)
gen_loss_B = torch.mean((out_mnist - 1) ** 2)
g_loss = gen_loss_B
if self.use_reconst_loss:
reconst_loss_B = torch.mean((svhn - reconst_svhn) ** 2)
g_loss += reconst_loss_B
if self.use_distance_loss:
dist_B = self.get_distance_losses(svhn, fake_mnist, A_to_AB=False) * self.lambda_distance_B
g_loss += dist_B
elif self.use_self_distance:
dist_B = self.get_self_distances(svhn, fake_mnist, A_to_AB=False) * self.lambda_distance_B
g_loss += dist_B
g_loss.backward()
self.g_optimizer.step()
# print the log info
if (step+1) % self.log_step == 0:
print('Step [%d/%d], d_real_loss: %.4f, d_mnist_loss: %.4f, d_svhn_loss: %.4f, '
'd_fake_loss: %.4f, gen_loss_A: %.4f, gen_loss_B: %.4f,'
%(step+1, self.train_iters, d_real_loss.data[0], d_mnist_loss.data[0],
d_svhn_loss.data[0], d_fake_loss.data[0], gen_loss_A.data[0],gen_loss_B.data[0]))
if self.use_reconst_loss:
print ('reconst_loss_A: %.4f, recons_loss_B: %.4f, ' %
(reconst_loss_A.data[0], reconst_loss_B.data[0]))
if self.use_distance_loss or self.use_self_distance:
print ('dist_loss_A: %.4f, dist_loss_B: %.4f, ' %
(dist_A.data[0], dist_B.data[0]))
# save the sampled images
if (step+1) % self.sample_step == 0:
fake_svhn = self.g12(fixed_mnist)
fake_mnist = self.g21(fixed_svhn)
mnist, fake_mnist = self.to_data(fixed_mnist), self.to_data(fake_mnist)
svhn , fake_svhn = self.to_data(fixed_svhn), self.to_data(fake_svhn)
merged = self.merge_images(mnist, fake_svhn)
path = os.path.join(self.sample_path, 'sample-%d-m-s.png' %(step+1))
scipy.misc.imsave(path, merged)
print ('saved %s' %path)
merged = self.merge_images(svhn, fake_mnist)
path = os.path.join(self.sample_path, 'sample-%d-s-m.png' %(step+1))
scipy.misc.imsave(path, merged)
print ('saved %s' %path)
if (step+1) % 5000 == 0:
# save the model parameters for each epoch
g12_path = os.path.join(self.model_path, 'g12-%d.pkl' %(step+1))
g21_path = os.path.join(self.model_path, 'g21-%d.pkl' %(step+1))
d1_path = os.path.join(self.model_path, 'd1-%d.pkl' %(step+1))
d2_path = os.path.join(self.model_path, 'd2-%d.pkl' %(step+1))
torch.save(self.g12.state_dict(), g12_path)
torch.save(self.g21.state_dict(), g21_path)
torch.save(self.d1.state_dict(), d1_path)
torch.save(self.d2.state_dict(), d2_path)
|
507004 | from setuptools import find_packages, setup
setup(
name='datpy',
version='1.0.0',
py_modules=['datpy'],
description='Python dough for making Dat-flavored pies',
author='<NAME>',
author_email='<EMAIL>',
url='http://dat-data.com',
download_url='https://github.com/karissa/datpy/tarball/0.7.0',
keywords=['dat', 'python', 'analytics', 'data', 'data science', 'data sharing'],
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
),
test_suite="test.py"
)
|
507009 | from typing import Dict, List, Optional, Union
from spacy.language import Language
from edsnlp.utils.deprecation import deprecated_factory
from . import Pollution
DEFAULT_CONFIG = dict(
pollution=None,
)
@deprecated_factory("pollution", "eds.pollution", default_config=DEFAULT_CONFIG)
@Language.factory("eds.pollution", default_config=DEFAULT_CONFIG)
def create_component(
nlp: Language,
name: str,
pollution: Optional[Dict[str, Union[str, List[str]]]],
):
return Pollution(
nlp,
pollution=pollution,
)
|
507024 | import pydoodle
c = pydoodle.Compiler(clientId="client-id",
clientSecret="client-secret")
with open(file="test1.py") as f:
script = f.read()
f.close()
result = c.execute(script=script, language="python3")
usage = c.usage()
print(usage, result.output, sep='\n')
|
507025 | from django.utils.translation import gettext_lazy as _
WIDGET_TYPES = (
('text', _('Text')),
('textarea', _('Textarea')),
('yesno', _('Yes/No')),
('checkbox', _('Checkboxes')),
('radio', _('Radio buttons')),
('select', _('Select drop-down')),
('autocomplete', _('Autocomplete')),
('range', _('Range slider')),
('date', _('Date picker')),
('file', _('File upload'))
)
|
507030 | import unittest
import pulse as p
class SignalTest(unittest.TestCase):
"""This test Class is for signal_diff()"""
def test_signal_diff(self):
"""
This function test to assure that
signal_diff() returns a size of 200.
"""
testing_uid = "1kzd0DmeunLGEeB0nWLFFaIfuFZn"
pulse = p.Pulse()
pulse.pulsebox_to_frames(testing_uid)
red = pulse.signal_diff()
self.assertEqual(len(red), 200)
unittest.main()
|
507037 | from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from ..models import Token
from ..app_settings import ActivateTokenSerializer
from ..authentication import TokenAuthenticationAllowInactive
from ..utils import decrypt_with_db_secret
class ActivateTokenView(GenericAPIView):
authentication_classes = (TokenAuthenticationAllowInactive, )
permission_classes = (IsAuthenticated,)
serializer_class = ActivateTokenSerializer
token_model = Token
allowed_methods = ('POST', 'OPTIONS', 'HEAD')
def get(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def put(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, request, *args, **kwargs):
"""
Activates a token
:param request:
:type request:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: 200 / 400
:rtype:
"""
serializer = self.get_serializer(data=self.request.data)
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
token = serializer.validated_data['token']
token.active = True
token.user_validator = None
token.save()
return Response({
"user": {
"id": request.user.id,
"authentication": 'AUTHKEY',
"email": decrypt_with_db_secret(request.user.email),
"secret_key": request.user.secret_key,
"secret_key_nonce": request.user.secret_key_nonce
}
},status=status.HTTP_200_OK)
def delete(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED) |
507071 | from rest_framework.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from openbook_invitations.models import UserInvite
def invite_id_exists(invite_id):
if not UserInvite.objects.filter(id=invite_id).exists():
raise ValidationError(
_('The invite does not exist.'),
)
def check_invite_not_used(invite_id):
if UserInvite.objects.filter(id=invite_id, created_user__isnull=False).exists():
raise ValidationError(
_('The invite has already been used.'),
)
|
507080 | from moha import *
a = 1.0 #unit cell length
def square(a):
#define a 2D square lattice cell with vectors a1 a2 and a3
cell = Cell(2,[a, 0., 0.],[0., a, 0.],[0., 0., 0.])
#add a site labeled 'A' at positon [0.,0.,0.]
cell.add_site(LatticeSite([0.,0.,0.],'A'))
#add a bond from first site in cell [0,0,0] to first site in cell [1,0,0]
cell.add_bond(LatticeBond([0,0,0],0,[1,0,0],0))
#add a bond from first site in cell [0,0,0] to first site in cell [0,1,0]
cell.add_bond(LatticeBond([0,0,0],0,[0,1,0],0))
#buld a lattice of shape [4,4,1] with cell we defined
lattice = Lattice(cell,[4,4,1])
return lattice
lattice = square(a)
#plot the lattice we constructed
lattice.plot()
|
507108 | import json
import logging
from pathlib import Path
from os.path import sep
def load_mitigation(mitigation_name: str, raise_error=False, force=False) -> dict:
"""
Wrapper for mitigation loader.
:param mitigation_name: The object to type check
:param raise_error: Raise the error if any.
:type raise_error: bool
:param force: Force the analysis and ingore the cache
:type force: bool
:return: Dict of the mitigation if present, empty dict or raise error if not
:rtype: dict
:raise FileNotFoundError: If mitigation not found
"""
return MitigationLoader().load_mitigation(mitigation_name, raise_error, force)
class MitigationLoader:
__cache = {}
def load_mitigation(
self, mitigation_name: str, raise_error=True, force=False
) -> dict:
"""
Load the mitigation and return the dict of the mitigation loaded
:param mitigation_name: The object to type check
:param raise_error: Raise the error if any.
:type raise_error: bool
:param force: Force the analysis and ingore the cache
:type force: bool
:return: Dict of the mitigation if present, empty dict or raise error if not
:rtype: dict
:raise FileNotFoundError: If mitigation not found
"""
mitigation_name = mitigation_name.replace(" ", "_")
mitigation_name = mitigation_name.upper()
mitigation_path = Path(f"configs{sep}mitigations{sep}{mitigation_name}.json")
if not mitigation_path.exists():
if raise_error:
raise FileNotFoundError(
f"Mitigation file missing at {mitigation_path.absolute()}"
)
else:
logging.warning(
f"Mitigation file missing at {mitigation_path.absolute()}, returning empty dict."
)
return {}
if force:
with mitigation_path.open() as file:
mitigation_data = json.load(file)
self.__cache[mitigation_name] = mitigation_data.copy()
else:
if mitigation_name in self.__cache:
mitigation_data = self.__cache[mitigation_name].copy()
else:
mitigation_data = self.load_mitigation(
mitigation_name, raise_error, force=True
)
return mitigation_data
|
507123 | from can_decoder.dataframe.DataFrameDecoder import DataFrameDecoder
from can_decoder.dataframe.DataFrameGenericDecoder import DataFrameGenericDecoder
from can_decoder.dataframe.DataFrameJ1939Decoder import DataFrameJ1939Decoder
|
507141 | import functools
import json
import os.path as path
import cv2
import numpy as np
def exist_file_path(file_id):
if not path.exists(file_id):
raise FileNotFoundError(f'index: {file_id}\n')
else:
image_path = path.abspath(file_id)
return image_path
def match_file_path(file_id):
file_type = None
if path.exists(file_id + '.jpg'):
file_type = '.jpg'
elif path.exists(file_id + '.JPG'):
file_type = '.JPG'
elif path.exists(file_id + '.png'):
file_type = '.png'
elif path.exists(file_id + '.PNG'):
file_type = '.PNG'
elif path.exists(file_id + '.gif'):
pass
elif path.exists(file_id + '.GIF'):
pass
else:
raise FileNotFoundError(f'index: {file_id}\n')
if file_type is None:
image_path = None
else:
image_path = path.abspath(file_id + file_type)
return image_path
class CocoLabel:
def __init__(self, item_info, item_licenses, item_categories):
self.item_info = item_info
self.item_licenses = item_licenses
self.item_categories = item_categories
self.items_image = []
self.items_annotation = []
def dump(self, dst_label_file):
with open(dst_label_file, 'w') as f:
json.dump({'info': self.item_info, 'licenses': self.item_licenses, 'categories': self.item_categories,
'images': self.items_image, 'annotations': self.items_annotation}, f)
class GenerateUtil:
def __init__(self, src_info, with_dir_name, match_suffix, use_ignore):
self.src_info = src_info
self.with_dir_name = with_dir_name
self.match_suffix = match_suffix
self.use_ignore = use_ignore
@staticmethod
def generate_item_info():
info = {
"description": "ICDAR 2017 MLT Dataset",
"url": "http://rrc.cvc.uab.es",
"version": "0.1.0",
"year": 2018,
"contributor": "<NAME>",
"date_created": "2018/11/30"
}
return info
@staticmethod
def generate_item_licenses():
licenses = [
{
"id": 1,
"name": "ICDAR 2017 MLT",
"url": "http://rrc.cvc.uab.es"
}
]
return licenses
@staticmethod
def generate_item_categories():
categories = [
{
'id': 1,
'name': 'text',
'supercategory': 'instance',
}
]
return categories
@staticmethod
def generate_item_image(items_image, file_name, image_size):
image_info = {
"id": len(items_image) + 1,
"file_name": file_name,
"width": image_size[1],
"height": image_size[0],
"date_captured": '2018-11-30 16:00:00',
"license": 1,
"coco_url": '',
"flickr_url": ''
}
items_image.append(image_info)
@staticmethod
def generate_item_fake_annotation(items_annotation, image_id, i, image_size=None):
segmentation = [[1, 1, 1, 1, 1, 1, 1, 1]]
bounding_box = [0, 0, 1, 1]
annotation_info = {
"id": image_id,
"image_id": image_id,
"category_id": 1,
"iscrowd": 0,
"area": 1,
"bbox": bounding_box,
"segmentation": segmentation,
"width": 1,
"height": 1,
}
return items_annotation.append(annotation_info)
@staticmethod
def generate_item_true_annotation(items_annotation, image_id, image_index, image_size, label_path_template, use_ignore):
label_path = path.abspath(label_path_template % image_index)
with open(label_path) as f:
for line in f.readlines():
data = line.split(',')
segmentation = np.asarray(data[:8], dtype=int)
iscrowd = 0 if data[9] != '###\n' else 1
points = segmentation.reshape((-1, 2))
segmentation = [segmentation.tolist()]
area = cv2.contourArea(points)
bounding_box = cv2.boundingRect(points) # [x, y, w, h]
annotation_info = {
"id": len(items_annotation) + 1,
"image_id": image_id,
"category_id": 1,
"iscrowd": 0 if not use_ignore else iscrowd,
"area": area,
"bbox": bounding_box,
"segmentation": segmentation,
"width": image_size[1],
"height": image_size[0],
}
items_annotation.append(annotation_info)
def get_coco_label(self):
item_info = self.generate_item_info()
item_licenses = self.generate_item_licenses()
item_categories = self.generate_item_categories()
coco_label = CocoLabel(item_info, item_licenses, item_categories)
return coco_label
def insert_factory(self, data_type: str):
def _insert_annotation(i, coco_label: CocoLabel):
image_path = get_file_path(file_id=image_path_template % i)
# skip gif file format
if image_path is None:
return
image_name = get_image_name(image_path)
image_size = np.shape(cv2.imread(image_path))[:2]
generate_item_image(coco_label.items_image, image_name, image_size)
generate_item_annotation(coco_label.items_annotation, len(coco_label.items_image), i, image_size)
root_dir, image_dir_dict, image_template_dict, label_dir_dict, label_template_dict = self.src_info
assert data_type in image_dir_dict.keys()
image_path_template = path.join(root_dir, image_dir_dict[data_type], image_template_dict[data_type])
generate_item_image = self.generate_item_image
if data_type in label_dir_dict:
label_path_template = path.join(root_dir, label_dir_dict[data_type], label_template_dict[data_type])
generate_item_annotation = functools.partial(self.generate_item_true_annotation,
label_path_template=label_path_template,
use_ignore=self.use_ignore)
else:
generate_item_annotation = self.generate_item_fake_annotation
if self.with_dir_name:
get_image_name = lambda image_path: path.join(image_dir_dict[data_type], path.basename(image_path))
else:
get_image_name = lambda image_path: path.basename(image_path)
if self.match_suffix:
get_file_path = match_file_path
else:
get_file_path = exist_file_path
return _insert_annotation
|
507179 | from mando import command, main
@command
def po(a=2, b=3):
print(a ** b)
if __name__ == '__main__':
main()
|
507211 | def normalize(name):
name = name.capitalize()
return name
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2)
|
507221 | src =Split('''
''')
component =aos_component('lib_mico_ble_firmware', src)
BT_CHIP = aos_global_config.get('BT_CHIP')
BT_CHIP_REVISION = aos_global_config.get('BT_CHIP_REVISION')
BT_CHIP_XTAL_FREQUENCY = aos_global_config.get('BT_CHIP_XTAL_FREQUENCY')
if BT_CHIP_XTAL_FREQUENCY == None:
src.add_sources( BT_CHIP+BT_CHIP_REVISION+'/bt_firmware_image.c')
else:
src.add_sources( BT_CHIP+BT_CHIP_REVISION+'/'+BT_CHIP_XTAL_FREQUENCY+'/bt_firmware_image.c')
|
507227 | import logging
import os
import toml
from test_framework.utils import (
TailableProc,
VERBOSE,
LOG_LEVEL,
MIRADORD_PATH,
)
from nacl.public import PrivateKey as Curve25519Private
# FIXME: it's a bit clumsy. Miradord should stick to be the `miradord` process object
# and we should have another class (PartialRevaultNetwork?) to stuff helpers and all
# info not strictly necessary to running the process.
class Miradord(TailableProc):
def __init__(
self,
datadir,
deposit_desc,
unvault_desc,
cpfp_desc,
emer_addr,
listen_port,
noise_priv,
stk_noise_key,
coordinator_noise_key,
coordinator_port,
bitcoind_rpcport,
bitcoind_cookie,
plugins=[],
):
"""All public keys must be hex"""
TailableProc.__init__(self, datadir, verbose=VERBOSE)
self.prefix = os.path.split(datadir)[-1]
self.noise_secret = noise_priv
self.listen_port = listen_port
self.deposit_desc = deposit_desc
self.unvault_desc = unvault_desc
self.cpfp_desc = cpfp_desc
self.emer_addr = emer_addr
# The data is stored in a per-network directory. We need to create it
# in order to write the Noise private key
self.datadir_with_network = os.path.join(datadir, "regtest")
os.makedirs(self.datadir_with_network, exist_ok=True)
self.conf_file = os.path.join(datadir, "config.toml")
self.cmd_line = [MIRADORD_PATH, "--conf", f"{self.conf_file}"]
self.noise_secret_file = os.path.join(self.datadir_with_network, "noise_secret")
with open(self.noise_secret_file, "wb") as f:
f.write(noise_priv)
wt_noise_key = bytes(Curve25519Private(noise_priv).public_key)
logging.debug(
f"Watchtower Noise key: {wt_noise_key.hex()}, Stakeholder Noise key: {stk_noise_key}"
)
with open(self.conf_file, "w") as f:
f.write(f"data_dir = '{datadir}'\n")
f.write("daemon = false\n")
f.write(f"log_level = '{LOG_LEVEL}'\n")
f.write(f'stakeholder_noise_key = "{stk_noise_key}"\n')
f.write(f'coordinator_host = "127.0.0.1:{coordinator_port}"\n')
f.write(f'coordinator_noise_key = "{coordinator_noise_key}"\n')
f.write("coordinator_poll_seconds = 5\n")
f.write(f'listen = "127.0.0.1:{listen_port}"\n')
f.write("[scripts_config]\n")
f.write(f'deposit_descriptor = "{deposit_desc}"\n')
f.write(f'unvault_descriptor = "{unvault_desc}"\n')
f.write(f'cpfp_descriptor = "{cpfp_desc}"\n')
f.write(f'emergency_address = "{emer_addr}"\n')
f.write("[bitcoind_config]\n")
f.write('network = "regtest"\n')
f.write(f"cookie_path = '{bitcoind_cookie}'\n")
f.write(f"addr = '127.0.0.1:{bitcoind_rpcport}'\n")
f.write("poll_interval_secs = 5\n")
f.write(f"\n{toml.dumps({'plugins': plugins})}\n")
def start(self):
TailableProc.start(self)
self.wait_for_logs(
["bitcoind now synced", "Listener thread started", "Started miradord."]
)
def stop(self, timeout=10):
return TailableProc.stop(self)
def cleanup(self):
try:
self.stop()
except Exception:
self.proc.kill()
def add_plugins(self, plugins):
"""Takes a list of dict representing plugin config to add to the watchtower and
restarts it."""
self.stop()
conf = toml.loads(open(self.conf_file, "r").read())
if "plugins" not in conf:
conf["plugins"] = []
conf["plugins"] += plugins
open(self.conf_file, "w").write(toml.dumps(conf))
self.start()
def remove_plugins(self, plugins_paths):
self.stop()
conf = toml.loads(open(self.conf_file, "r").read())
conf["plugins"] = [p for p in conf["plugins"] if p["path"] not in plugins_paths]
open(self.conf_file, "w").write(toml.dumps(conf))
self.start()
|
507235 | from django.conf import settings
from django.http import HttpResponseRedirect
# We need to pull this middleware here since it's old and is not maintained anymore, so we need to take care
# it ourselfves. Please note that this middleware is not exactly the same to its published one.
class BrowscapParser(object):
DEFAULT_UA_STRINGS = (
'Android',
'BlackBerry',
'IEMobile',
'Maemo',
'Opera Mini',
'SymbianOS',
'WebOS',
'Windows Phone',
'iPhone',
'iPod',
)
def __init__(self):
self._cache = {}
def detect_mobile(self, user_agent):
try:
return self._cache[user_agent]
except KeyError:
for lookup in BrowscapParser.DEFAULT_UA_STRINGS:
if lookup in user_agent:
self._cache[user_agent] = True
break
else:
self._cache[user_agent] = False
return self._cache[user_agent]
browser = BrowscapParser()
class MobileRedirectMiddleware(object):
def process_request(self, request):
user_agent = request.META.get('HTTP_USER_AGENT', '')
is_mobile = browser.detect_mobile(user_agent)
request.is_mobile = is_mobile
request_host = request.META.get('HTTP_HOST', '')
if is_mobile and request_host != settings.SITE_INFO['mobile_host']:
jump_url = "http://%s%s" % (settings.SITE_INFO['mobile_host'], request.path)
return HttpResponseRedirect(jump_url)
|
507237 | from django.http import HttpResponse
from django.views.generic import View
from final.utils import render_to_pdf
from bus_board.views import bus_details
from bus_board.models import Ticket
# class GeneratePdf(View):
# def get(self, request, *args, **kwargs):
# # data = {
# # 'today': datetime.date.today(),
# # 'amount': 39.99,
# # 'customer_name': '<NAME>',
# # 'order_id': 1233434,
# # }
# # print(Ticket.get_single_ticket(ticket_id))
# pdf = render_to_pdf('pdf/ticket.html')
# return HttpResponse(pdf, content_type='application/pdf')
def generate_view(request, ticket_id):
# data = {
# 'today': datetime.date.today(),
# 'amount': 39.99,
# 'customer_name': '<NAME>',
# 'order_id': 1233434,
# }
gotten_ticket = Ticket.get_single_ticket(ticket_id)
pdf = render_to_pdf('pdf/ticket.html', {'gotten_ticket':gotten_ticket})
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
filename = "Ticket_%s.pdf" %(gotten_ticket.ticket_number)
content = "inline; filename='%s'"%(filename)
download = request.GET.get("download")
if download:
content = "attachment; filename='%s'"%(filename)
response['Content-Disposition'] = content
return response
return HttpResponse('Not Found') |
507294 | import pytest
import json
class TestRemoveSwitchPartitionMember:
SWITCH_PARTITION_MEMBER_TEST_DATA = [
['default', '00:00:00:00:00:00:00:00', '', '', '', 'add_default_member_output.json'],
['default', '', 'backend-0-0', 'ib0', '', 'add_default_member_output.json'],
['Default', '', 'backend-0-0', 'ib0', 'limited', 'add_default_member_output.json'],
['default', '', 'backend-0-0', 'ib0', 'full', 'add_default_member_full_output.json'],
['aaa', '', 'backend-0-0', 'ib0', '', 'add_nondefault_member_output.json'],
['AaA', '', 'backend-0-0', 'ib0', '', 'add_nondefault_member_output.json'],
['AaA', '', 'backend-0-0', 'ib0', 'limited', 'add_nondefault_member_output.json'],
['0x0aaa', '00:00:00:00:00:00:00:00', '', '', 'full', 'add_nondefault_member_full_output.json'],
]
SWITCH_PARTITION_MEMBER_NEGATIVE_TEST_DATA = [
['0xfred', '', 'backend-0-0', 'ib0'],
['default', '', 'no-such-host', 'ib0'],
['Default', '', 'backend-0-0', 'fake_iface'],
['0x0aaa', '00:00:00:00:00:00:00:00', 'backend-0-0', ''],
['0x0aaa', '00:00:00:00:00:00:00:00', '', 'ib0'],
['0x0aaa', 'fake:guid', '', ''],
]
@pytest.mark.parametrize("partition_name,guid,hostname,interface,membership,output_file", SWITCH_PARTITION_MEMBER_TEST_DATA)
def test_behavior(self, host, add_ib_switch, add_ib_switch_partition, add_host_with_interface,
partition_name, guid, hostname, interface, membership, output_file, test_file):
with open(test_file(f'add/{output_file}')) as f:
expected_output = f.read()
result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac=00:00:00:00:00:00:00:00')
assert result.rc == 0
if partition_name.lower() != 'default':
add_ib_switch_partition('switch-0-0', partition_name, None)
# command can be called with guid or with hostname+iface
cmd = [f'stack set switch partition membership switch-0-0 name={partition_name}']
params = []
if guid:
params.append(f'guid={guid}')
elif hostname and interface:
params.append(f'member={hostname} interface={interface}')
if membership:
params.append(f'membership={membership}')
result = host.run(' '.join(cmd + params))
assert result.rc == 0
# list switch partition member does not list partitions which have no members
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
# command can be called with guid or with hostname+iface
cmd = [f'stack remove switch partition member switch-0-0 name={partition_name}']
result = host.run(' '.join(cmd + params))
assert result.rc == 0
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert result.stdout.strip() == ''
def test_negative_behavior(self, host, add_host_with_interface, add_ib_switch, add_ib_switch_partition, test_file):
with open(test_file('add/add_default_member_output.json')) as f:
expected_output = f.read()
# add a host...
partition_name = 'default'
guid = '00:00:00:00:00:00:00:00'
result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac={guid}')
assert result.rc == 0
# should be able to add
result = host.run(f'stack add switch partition member switch-0-0 name=Default guid={guid}')
assert result.rc == 0
# should error on invalid name
result = host.run(f'stack remove switch partition member switch-0-0 name=fake guid={guid}')
assert result.rc != 0
assert result.stderr.strip() != ''
# should error on valid but non-existing partition
result = host.run(f'stack remove switch partition member switch-0-0 name=aaa guid={guid}')
assert result.rc != 0
assert result.stderr.strip() != ''
# bad remove should leave db same
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
# should not error on valid, existing name with non-existing guid
result = host.run(f'stack remove switch partition member switch-0-0 name=default guid=5')
assert result.rc == 0
assert result.stderr.strip() == ''
assert result.stdout.strip() == ''
# ... but it also shouldn't do anything.
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
@pytest.mark.parametrize("partition_name,guid,hostname,interface", SWITCH_PARTITION_MEMBER_NEGATIVE_TEST_DATA)
def test_bad_input(self, host, add_ib_switch, add_ib_switch_partition, add_host_with_interface,
partition_name, guid, hostname, interface, test_file):
with open(test_file('add/add_default_member_output.json')) as f:
expected_output = f.read()
# add a host...
host_guid = '00:00:00:00:00:00:00:00'
result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac={host_guid}')
assert result.rc == 0
result = host.run(f'stack add switch partition member switch-0-0 name=default guid={host_guid}')
assert result.rc == 0
# command can be called with guid or with hostname+iface
cmd = [f'stack remove switch partition member switch-0-0 name={partition_name}']
params = []
if guid:
params.append(f'guid={guid}')
if hostname:
params.append(f'member={hostname}')
if interface:
params.append(f'interface={interface}')
result = host.run(' '.join(cmd + params))
assert result.rc != 0
assert result.stderr.strip() != ''
assert result.stdout.strip() == ''
# list switch partition member does not list partitions which have no members
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
def test_passed_no_args(self, host, add_ib_switch):
result = host.run(f'stack remove switch partition member name=default')
assert result.rc != 0
assert result.stderr.strip() != ''
def test_can_remove_twice(self, host, add_host_with_interface, add_ib_switch, add_ib_switch_partition, test_file):
with open(test_file('add/add_default_member_output.json')) as f:
expected_output = f.read()
partition_name = 'default'
guid = '00:00:00:00:00:00:00:00'
result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac={guid}')
assert result.rc == 0
result = host.run(f'stack add switch partition member switch-0-0 name={partition_name} guid={guid}')
assert result.rc == 0
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
# should be able to remove all day long
for i in range(2):
result = host.run(f'stack remove switch partition switch-0-0 name={partition_name} guid={guid}')
assert result.rc == 0
assert result.stdout.strip() == ''
result = host.run('stack list switch partition member switch-0-0 output-format=json')
assert result.rc == 0
assert result.stdout.strip() == ''
assert result.stderr.strip() == ''
@pytest.mark.skip()
def test_can_remove_names_that_resolve_same(self, host, add_ib_switch, test_file):
with open(test_file('add/add_nondefault_partition_output.json')) as f:
expected_output = f.read()
same_parts = ['aaa', '0xaaa', '0x0aaa', 'AAA']
for partition_name in same_parts[1:]:
result = host.run(f'stack add switch partition switch-0-0 name=aaa')
assert result.rc == 0
result = host.run('stack list switch partition switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
result = host.run(f'stack remove switch partition switch-0-0 name={partition_name}')
assert result.rc == 0
result = host.run('stack list switch partition switch-0-0 output-format=json')
assert result.rc == 0
assert result.stdout.strip() == ''
def test_cannot_remove_from_non_ib(self, host, add_switch):
result = host.run(f'stack remove switch partition member switch-0-0 name=Default')
assert result.rc != 0
assert result.stderr.strip() != ''
def test_cannot_remove_with_enforce_sm(self, host, add_ib_switch):
# by design this should fail if there's no actual switch to talk to.
result = host.run(f'stack remove switch partition member switch-0-0 name=Default enforce_sm=true')
assert result.rc != 0
assert result.stderr.strip() != ''
@pytest.mark.skip()
@pytest.mark.parametrize("partition_name,guid,hostname,interface,membership,output_file", SWITCH_PARTITION_MEMBER_TEST_DATA)
def test_two_switches_same_partition_name(self, host, add_ib_switch,
partition_name, guid, hostname, interface, membership, output_file, test_file):
with open(test_file(f'add/{output_file}')) as f:
expected_output = f.read()
# add second switch
add_ib_switch('switch-0-1', '0', '1', 'switch', 'Mellanox', 'm7800', 'infiniband')
result = host.run(f'stack add switch partition switch-0-0 name={partition_name} options="{options}"')
assert result.rc == 0
result = host.run(f'stack add switch partition switch-0-1 name={partition_name} options="{options}"')
assert result.rc == 0
result = host.run('stack list switch partition switch-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == json.loads(expected_output)
# output here should be same as the output for switch-0-0, except for the name of the switch
result = host.run('stack list switch partition switch-0-1 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout.strip().replace('switch-0-1', 'switch-0-0')) == json.loads(expected_output)
result = host.run('stack remove switch partition switch-0-0 switch-0-1 output-format=json')
assert result.rc == 0
result = host.run('stack list switch partition switch-0-1 output-format=json')
assert result.rc == 0
assert result.stdout.strip() == ''
def test_remove_everything(self, host, add_host_with_interface, add_ib_switch, add_ib_switch_partition):
add_host_with_interface('backend-0-1', '0', '1', 'backend', 'eth0')
# add hosts with ib interfaces
for i in range(2):
result = host.run(f'stack add host interface backend-0-{i} interface=ib0 mac=00:00:00:00:00:00:00:0{i}')
assert result.rc == 0
# add second switch
add_ib_switch('switch-0-1', '0', '1', 'switch', 'Mellanox', 'm7800', 'infiniband')
add_ib_switch_partition('switch-0-1', 'default', None)
add_ib_switch_partition('switch-0-0', 'aaa', None)
add_ib_switch_partition('switch-0-1', 'aaa', None)
for i in range(2):
cmd = f'stack set switch partition membership switch-0-{i} name=default guid=00:00:00:00:00:00:00:0{i}'
result = host.run(cmd)
assert result.rc == 0
cmd = f'stack set switch partition membership switch-0-{i} name=aaa guid=00:00:00:00:00:00:00:0{i}'
result = host.run(cmd)
assert result.rc == 0
result = host.run('stack list switch partition member switch-0-0 switch-0-1 output-format=json')
assert result.rc == 0
assert len(json.loads(result.stdout.strip())) == 4
result = host.run('stack remove switch partition member switch-0-0')
assert result.rc == 0
result = host.run('stack list switch partition member switch-0-0 switch-0-1 output-format=json')
assert result.rc == 0
assert len(json.loads(result.stdout.strip())) == 2
result = host.run('stack remove switch partition member switch-0-1')
assert result.rc == 0
result = host.run('stack list switch partition member switch-0-0 switch-0-1 output-format=json')
assert result.rc == 0
assert result.stdout.strip() == ''
|
507326 | import tempfile
from dagster import ResourceDefinition, fs_io_manager, mem_io_manager
from dagster_pyspark import pyspark_resource
from hacker_news_assets.pipelines.download_pipeline import download_comments_and_stories_dev
from hacker_news_assets.resources.hn_resource import hn_snapshot_client
from hacker_news_assets.resources.parquet_io_manager import partitioned_parquet_io_manager
def test_download():
with tempfile.TemporaryDirectory() as temp_dir:
result = download_comments_and_stories_dev.graph.execute_in_process(
run_config={
"resources": {
"partition_start": {"config": "2020-12-30 00:00:00"},
"partition_end": {"config": "2020-12-30 01:00:00"},
"parquet_io_manager": {"config": {"base_path": temp_dir}},
}
},
resources={
"io_manager": fs_io_manager,
"partition_start": ResourceDefinition.string_resource(),
"partition_end": ResourceDefinition.string_resource(),
"parquet_io_manager": partitioned_parquet_io_manager,
"warehouse_io_manager": mem_io_manager,
"pyspark": pyspark_resource,
"hn_client": hn_snapshot_client,
},
)
assert result.success
|
507350 | from setuptools import setup
setup(
name='nrgpy_docs',
version='0.0.1',
description='nrgpy formatting for readthedocs',
url='https://github.com/nrgpy/nrgpy',
author='NRG Systems, Inc.',
author_email='<EMAIL>',
keywords='nrg systems rld symphonie symphoniepro wind data spidar remote sensor lidar',
packages=[
'nrgpy_docs'
],
install_requires=[
'sphinx',
],
python_requires='>=3.0',
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License'
]
)
|
507356 | import sys
from functools import wraps
def validate(*types, **kwargs):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return f
return decorator
def py3_to_bytes(bytes_or_str):
if sys.version_info[0] > 2 and isinstance(bytes_or_str, str):
return bytes_or_str.encode('utf-8')
return bytes_or_str
|
507442 | from . import couple
from onix.passport import *
from onix.sequence import *
from onix.cell import *
from onix.system import *
from onix.passlist import *
from onix.standalone import * |
507444 | import json
from datetime import datetime, timedelta
from app.api.now_applications.models.now_application_status import NOWApplicationStatus
from tests.now_application_factories import NOWApplicationIdentityFactory, NOWApplicationFactory
from tests.factories import MineFactory
class TestNOWApplicationStatus:
"""GET /now-applications/application-status-codes"""
def test_get_application_status_codes(self, test_client, db_session, auth_headers):
"""Should return the correct number of records with a 200 response code"""
get_resp = test_client.get(
f'/now-applications/application-status-codes', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_resp.status_code == 200
assert len(get_data['records']) == len(NOWApplicationStatus.get_all())
"""PUT /now_applications/ID/status"""
def test_put_application_status(self, test_client, db_session, auth_headers):
mine = MineFactory(major_mine_ind=True, mine_permit_amendments=1)
now_application = NOWApplicationFactory(application_progress=None)
now_application_identity = NOWApplicationIdentityFactory(
now_application=now_application, mine=mine)
put_resp = test_client.put(
f'/now-applications/{now_application_identity.now_application_guid}/status',
json={
'issue_date': datetime.now().isoformat(),
'auth_end_date': (datetime.now() + timedelta(days=30)).isoformat(),
'now_application_status_code': 'REJ'
},
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
def test_put_application_status_WDN(self, test_client, db_session, auth_headers):
mine = MineFactory(major_mine_ind=True, mine_permit_amendments=1)
now_application = NOWApplicationFactory(application_progress=None)
now_application_identity = NOWApplicationIdentityFactory(
now_application=now_application, mine=mine)
put_resp = test_client.put(
f'/now-applications/{now_application_identity.now_application_guid}/status',
json={
'issue_date': datetime.now().isoformat(),
'auth_end_date': (datetime.now() + timedelta(days=30)).isoformat(),
'now_application_status_code': 'WDN'
},
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
|
507454 | from django.utils.translation import ugettext as _
from django.db import models
# Create your models here.
MESSAGES = (
_("Hello!"),
_('Bye!'),
_('Thank you!'),
)
|
507474 | import logging
from polyswarmclient.parameters import Parameters
logger = logging.getLogger(__name__)
class StakingClient(object):
def __init__(self, client):
self.__client = client
self.parameters = {}
async def fetch_parameters(self, chain, api_key=None):
"""Get staking parameters from polyswarmd
Args:
chain (str): Which chain to operate on
api_key (str): Override default API key
Returns:
Response JSON parsed from polyswarmd containing staking parameters
"""
result = {
'minimum_stake': 0,
'maximum_stake': 0,
'vote_ratio_numerator': 9,
'vote_ratio_denominator': 10
}
self.parameters[chain] = Parameters(result)
async def get_total_balance(self, chain, api_key=None):
"""Get total staking balance from polyswarmd
Args:
chain (str): Which chain to operate on
api_key (str): Override default API key
Returns:
Response JSON parsed from polyswarmd containing staking balance
"""
return 0
async def get_withdrawable_balance(self, chain, api_key=None):
"""Get withdrawable staking balance from polyswarmd
Args:
chain (str): Which chain to operate on
api_key (str): Override default API key
Returns:
Response JSON parsed from polyswarmd containing staking balance
"""
return 0
async def post_deposit(self, amount, chain, api_key=None):
"""Post a deposit to the staking contract
Args:
amount (int): The amount to stake
chain (str): Which chain to operate on
api_key (str): Override default API key
Returns:
Response JSON parsed from polyswarmd containing emitted events
"""
pass
async def post_withdraw(self, amount, chain, api_key=None):
"""Post a withdrawal to the staking contract
Args:
amount (int): The amount to withdraw
chain (str): Which chain to operate on
api_key (str): Override default API key
Returns:
Response JSON parsed from polyswarmd containing emitted events
"""
pass
|
507506 | from base import DataSourceBase
from datasource_exceptions import InvalidDataSourceFormatException
__all__ = ['FileDataSource', 'File64DataSource']
class FileDataSource(DataSourceBase):
datasource_name = 'file'
def __init__(self, data_source):
super(FileDataSource, self).__init__(data_source)
if not ':' in data_source:
raise InvalidDataSourceFormatException("FileDataSource must be in name:path_to_file format")
name, path = data_source.split(':', 1)
with open(path) as f:
self.data = {name: f.read(-1)}
class File64DataSource(DataSourceBase):
datasource_name = 'file64'
def __init__(self, data_source):
super(File64DataSource, self).__init__(data_source)
if not ':' in data_source:
raise InvalidDataSourceFormatException("File64DataSource must be in name:path_to_file format")
name, path = data_source.split(':', 1)
with open(path) as f:
self.data = {name: f.read(-1).encode('base64')}
|
507508 | from app import application as app
import app.protobufs.decisiontrees_pb2 as pb
from flask.ext.pymongo import PyMongo
from protobuf_to_dict import protobuf_to_dict
import fake_data
import json
import unittest
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
app.config['TEST_DBNAME'] = 'ui_test'
try:
app.mongo = PyMongo(app, config_prefix='TEST')
except:
pass
self.client = app.test_client()
num_trees, height = (5, 5)
self.row = pb.TrainingRow(
forestConfig=fake_data.fake_config(num_trees),
forest=fake_data.fake_forest(height, num_trees),
)
with app.test_request_context():
self._id = str(app.mongo.db.decisiontrees.insert(
protobuf_to_dict(self.row))
)
def test_decision_tree_list(self):
rv = self.client.get('/api/decisiontrees/')
result = json.loads(rv.data)
self.assertEqual(len(result), 1)
self.assertDecisionTreeEqual(result[0])
def assertDecisionTreeEqual(self, response):
self.assertEqual(response["_id"], self._id)
self.assertEqual(
response["forestConfig"],
protobuf_to_dict(self.row.forestConfig)
)
self.assertEqual(response["forest"], protobuf_to_dict(self.row.forest))
def test_decision_tree_detail(self):
rv = self.client.get('/api/decisiontrees/{0}'.format(self._id))
result = json.loads(rv.data)
self.assertDecisionTreeEqual(result)
def test_decision_tree_nonexistent(self):
rv = self.client.get('/api/decisiontrees/{0}'.format(0))
result = json.loads(rv.data)
self.assertEqual(result['status'], 500)
def tearDown(self):
with app.test_request_context():
app.mongo.db.decisiontrees.remove()
if __name__ == '__main__':
unittest.main()
|
507531 | import re
import shutil
import subprocess
import time
from mutagen.id3 import APIC, ID3, TIT2, TPE1, error
from mutagen.mp3 import MP3
from requests import Session
# Символы, на которых можно разбить сообщение
message_breakers = ["\n", ", "]
def update_parameter(config, section, name, num, config_path="../config.ini") -> int:
config.set(section, name, str(num))
with open(config_path, "w", encoding="utf-8") as f:
config.write(f)
return num
def split(text: str, max_message_length: int = 4091) -> list:
"""Разделение текста на части
:param text: Разбиваемый текст
:param max_message_length: Максимальная длина разбитой части текста
"""
if len(text) >= max_message_length:
last_index = max(map(lambda separator: text.rfind(separator, 0, max_message_length), message_breakers))
good_part = text[:last_index]
bad_part = text[last_index + 1 :]
return [good_part] + split(bad_part, max_message_length)
else:
return [text]
def list_splitter(lst: list, n: int) -> list:
return [lst[i : i + n] for i in range(0, len(lst), n)]
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
menu = [buttons[i : i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
def start_process(command: list) -> int:
process = subprocess.Popen(command)
while process.poll() is None:
time.sleep(1)
return process.returncode
def add_audio_tags(filename, artist, title, track_cover):
audio = MP3(filename, ID3=ID3)
# add ID3 tag if it doesn't exist
try:
audio.add_tags()
except error as e:
if str(e) != "an ID3 tag already exists":
return False
audio.clear()
if track_cover:
audio.tags.add(
APIC(
encoding=3, # 3 is for utf-8
mime="image/png", # image/jpeg or image/png
type=3, # 3 is for the cover image
desc=u"Cover",
data=open(track_cover, "rb").read(),
)
)
audio.tags.add(TIT2(encoding=3, text=title))
audio.tags.add(TPE1(encoding=3, text=artist))
audio.save()
return True
def download_video(session: Session, link: str):
filereq = session.get(link, stream=True)
res = re.findall(r"id=(\d*)(&type)?", link)
if res:
file = res[0][0] + ".mp4"
else:
file = re.findall(r"\/(.*)\/(.*)\?", link)[0][1]
with open(file, "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
return file
|