hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d5194e0e10a84e11cc1527894e07b73e9d9e2bc | 5,691 | py | Python | tutel/examples/helloworld_deepspeed.py | EricWangCN/tutel | f1bfee19e16348b747702c38c871256a420828b0 | [
"MIT"
] | 1 | 2022-02-23T11:17:00.000Z | 2022-02-23T11:17:00.000Z | tutel/examples/helloworld_deepspeed.py | EricWangCN/tutel | f1bfee19e16348b747702c38c871256a420828b0 | [
"MIT"
] | null | null | null | tutel/examples/helloworld_deepspeed.py | EricWangCN/tutel | f1bfee19e16348b747702c38c871256a420828b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import time
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.distributed as dist
from torch import nn
import argparse
import deepspeed
import logging
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--num_tokens', type=int, default=1024)
parser.add_argument('--model_dim', type=int, default=2048)
parser.add_argument('--hidden_size', type=int, default=2048)
parser.add_argument('--num_local_experts', type=int, default=2)
parser.add_argument('--dtype', type=str, default='float32')
parser.add_argument('--fp32_gate', default=False, action='store_true')
parser.add_argument('--top', type=int, default=2)
parser.add_argument('--use_tutel', default=False, action='store_true')
parser.add_argument('--num_steps', type=int, default=100)
args = parser.parse_args()
try:
if dist.is_available():
dist.init_process_group('nccl')
dist_rank = dist.get_rank()
dist_world_size = dist.get_world_size()
def dist_print(*args):
if dist_rank == 0:
print(*args)
except:
dist_rank = 0
dist_world_size = 1
dist_print = print
args.local_rank = args.local_rank if args.local_rank >= 0 else int(os.environ.get('LOCAL_RANK', 0))
torch.cuda.set_device(args.local_rank)
batch_size = args.batch_size
num_tokens = args.num_tokens
model_dim = args.model_dim
hidden_size = args.hidden_size
num_local_experts = args.num_local_experts
top_value = args.top
local_rank = args.local_rank
device = torch.device('cuda', args.local_rank)
if args.dtype == 'float32':
torch.set_default_dtype(torch.float32)
elif args.dtype == 'float64':
torch.set_default_dtype(torch.float64)
elif args.dtype == 'float16':
torch.set_default_dtype(torch.float16)
elif args.dtype == 'bfloat16':
torch.set_default_dtype(torch.bfloat16)
else:
raise Exception('Unrecognized data type specified: %s' % args.dtype)
assert deepspeed.version == '0.5.6'
torch.manual_seed(0)
deepspeed.init_distributed()
deepspeed.utils.groups.initialize(ep_size=dist_world_size)
class ExpertModel(torch.nn.Module):
def __init__(self, model_dim, hidden_size, activation_fn):
super().__init__()
self.fc1 = torch.nn.Linear(model_dim, hidden_size, bias=True)
self.fc2 = torch.nn.Linear(hidden_size, model_dim, bias=True)
self.activation_fn = activation_fn
def forward(self, x):
x = self.fc1(x)
x = self.activation_fn(x)
x = self.fc2(x)
return x
class ExampleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self._moe_layer = deepspeed.moe.layer.MoE(
hidden_size = hidden_size,
expert = ExpertModel(model_dim, hidden_size, lambda x: F.relu(x)),
num_experts = num_local_experts * dist_world_size,
k = top_value,
use_tutel = args.use_tutel
)
for name, param in self._moe_layer.named_parameters():
if '.experts.' in name:
setattr(param, 'skip_allreduce', True)
# Summary of different parameter types: gate, local_experts
local_count = sum([torch.numel(param) for name, param in self._moe_layer.named_parameters() if '.experts.' in name])
shared_count = sum([torch.numel(param) for name, param in self._moe_layer.named_parameters() if '.gate.' in name])
dist_print('[Statistics] param count for MoE local_experts = %s, param count for MoE gate = %s.\n' % (local_count, shared_count))
def forward(self, input):
result, _, _ = self._moe_layer(input)
result = F.log_softmax(torch.sum(result, dim=2), dim=1)
return result
model = ExampleModel().to(device)
dist_print(model)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-5)
torch.manual_seed(0)
x = torch.tensor(torch.randn([batch_size, num_tokens, model_dim], dtype=torch.float32, device='cpu').detach().numpy(), dtype=torch.get_default_dtype(), requires_grad=False, device=device)
y = torch.LongTensor(batch_size).random_(1).to(device)
tuples = (dist_world_size, args.dtype, model_dim, hidden_size, batch_size * num_tokens, num_local_experts, top_value, device)
dist_print('[Benchmark] world_size = %s, dtype = %s, model_dim = %s, hidden_size = %s, samples = %s, num_local_experts = %s, topK = %s, device = `%s`' % tuples)
average_time, num_steps = 0, args.num_steps
params_for_all_reduce = [p for p in model.parameters() if not hasattr(p, 'skip_allreduce') and getattr(p, 'requires_grad', False)]
for i in range(num_steps):
torch.cuda.synchronize()
t_start = time.time()
optimizer.zero_grad()
output = model(x)
loss = F.nll_loss(output, y)
loss.backward()
if dist_world_size > 1:
for p in params_for_all_reduce:
p.grad /= dist_world_size
dist.all_reduce(p.grad)
optimizer.step()
torch.cuda.synchronize()
t_stop = time.time()
num_global_experts = num_local_experts * dist_world_size
args.top = min(args.top, num_global_experts)
tflops = (batch_size * num_tokens * model_dim * hidden_size) * 4 * args.top * 3 * 1e-12 / (t_stop - t_start)
dist_print('STEP-%s: loss = %.5f, step_time = %.6f sec, perf = %.2f tflops.' % (i, float(loss.data), t_stop - t_start, tflops))
if i + 10 >= num_steps:
average_time += t_stop - t_start
average_time /= 10
dist_print('\n[Summary] Average synchronized step_time = %s sec.' % average_time)
| 35.12963 | 187 | 0.69689 |
c77263ac509df2aaa5d740e70b3e6bdba7f8d414 | 2,489 | py | Python | dlapi.py | omnibus661/omnidl | 4e4af09fa12e5d089e60ded05e4c276ef73168c9 | [
"MIT"
] | null | null | null | dlapi.py | omnibus661/omnidl | 4e4af09fa12e5d089e60ded05e4c276ef73168c9 | [
"MIT"
] | null | null | null | dlapi.py | omnibus661/omnidl | 4e4af09fa12e5d089e60ded05e4c276ef73168c9 | [
"MIT"
] | null | null | null | import json
import sys
import os
import logging
import asyncio
from threading import Thread
from time import sleep
from flask import Flask, abort, request, Response, send_from_directory
from flask_restful import Resource, Api
from flask_jsonpify import jsonify
from flask_cors import CORS
import youtube_dl as yt
import sys
import glob
import shutil
USERS = [
"omni",
"omnibus"
]
HOST = "omnijunk.xyz"
PORT = 66
MAINDIR = "/scripts/ytapi"
# flask base
app = Flask(__name__)
api = Api(app)
CORS(app)
def ytdl(vid):
urls = []
ydl_opts = {
'format': 'mp4',
}
if "https://youtu.be/" in vid:
urls.append(vid)
elif "https://youtube.com/" in vid:
modid = vid.replace("https://www.youtube.com/watch?v=","")
modid = "https://youtu.be/" + str(vid)
urls.append(modid)
else:
modid = "https://youtu.be/" + str(vid)
urls.append(modid)
with yt.YoutubeDL(ydl_opts) as ydl:
ydl.download(urls)
print("done downloading.")
sleep(1)
class base(Resource):
def post(self):
return {"RESPONSE":405}
def get(self):
vid = request.args['id']
user = request.args['u']
conf = request.args['dl']
if vid != "" or user != "":
if user in USERS:
#print(str(vid))
thread = Thread(target = ytdl, args = (vid, ))
thread.start()
thread.join()
globstr = "*"+ str(vid)+".mp4"
files = glob.glob(str(globstr))
print(files)
filename = files[0]
if conf == "y":
return send_from_directory(MAINDIR,filename, as_attachment = True)
else:
return {"RESPONSE":200}
else:
return {"RESPONSE":401}
else:
return {"RESPONSE":400}
if __name__ == '__main__':
import logging
logging.basicConfig(filename='DB_Server.log',level=logging.ERROR)
if sys.platform == "linux":
os.system('clear')
elif sys.platform == "win32":
os.system('cls')
else:
print("Now running on port "+str(PORT))
api.add_resource(base,'/') # send raw request data to database
#api.add_resource(view,'/dl')
#api.add_resource(QueryDB,'/query/')
app.run(host=HOST,port=PORT,debug=False)
| 25.397959 | 87 | 0.543994 |
5152cc69d0033c9518cd985779d3405856a3d05c | 1,074 | py | Python | hood/urls.py | fuaad001/Neighbourhood | 8c1489ce490540c71b319d9e539fe8c2cdd312a4 | [
"MIT"
] | null | null | null | hood/urls.py | fuaad001/Neighbourhood | 8c1489ce490540c71b319d9e539fe8c2cdd312a4 | [
"MIT"
] | 4 | 2021-03-19T00:58:02.000Z | 2021-09-08T01:01:19.000Z | hood/urls.py | fuaad001/Neighbourhood | 8c1489ce490540c71b319d9e539fe8c2cdd312a4 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.conf.urls.static import static
from django.conf import settings
from . import views
urlpatterns=[
url('^$', views.index, name = 'index'),
url(r'^home$', views.home, name = 'home'),
url(r'^accounts/profile/$', views.profile, name = 'profile'),
url(r'^services$', views.services, name = 'services'),
url(r'^business$', views.business, name = 'business'),
url(r'^about$', views.about, name = 'about'),
url(r'^user_admin$', views.user_admin, name = 'user_admin'),
url(r'^category$', views.category, name = 'category'),
url(r'^newneighbourhood$', views.newneighbourhood, name = 'newneighbourhood'),
url(r'^newservice$', views.newservice, name = 'newservice'),
url(r'^newbusiness$', views.newbusiness, name = 'newbusiness'),
url(r'^search$', views.search, name = 'search'),
url(r'^change$', views.change, name = 'change'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 46.695652 | 86 | 0.633147 |
28b59269b21730d53fc4b78318f2b50466f5c4fc | 1,995 | py | Python | test/nanoid_test.py | etrepum/py-nanoid | 2fc593ed3174c16f22dccdca716c15ce9a3e9af5 | [
"MIT"
] | 256 | 2017-09-20T11:40:48.000Z | 2022-03-31T23:02:56.000Z | test/nanoid_test.py | sthagen/py-nanoid | 99e5b478c450f42d713b6111175886dccf16f156 | [
"MIT"
] | 19 | 2017-09-21T08:36:01.000Z | 2022-02-01T06:16:30.000Z | test/nanoid_test.py | sthagen/py-nanoid | 99e5b478c450f42d713b6111175886dccf16f156 | [
"MIT"
] | 20 | 2017-09-20T15:21:22.000Z | 2022-03-12T05:19:23.000Z | # coding: utf-8
from sys import maxsize
from unittest import TestCase
from nanoid import generate, non_secure_generate
from nanoid.resources import alphabet
class TestNanoID(TestCase):
def test_flat_distribution(self):
count = 100 * 1000
length = 5
alphabet = 'abcdefghijklmnopqrstuvwxyz'
chars = {}
for _ in range(count):
id = generate(alphabet, length)
for j in range(len(id)):
char = id[j]
if not chars.get(char):
chars[char] = 0
chars[char] += 1
self.assertEqual(len(chars.keys()), len(alphabet))
max = 0
min = maxsize
for k in chars:
distribution = (chars[k] * len(alphabet)) / float((count * length))
if distribution > max:
max = distribution
if distribution < min:
min = distribution
self.assertLessEqual(max - min, 0.05)
def test_generates_url_friendly_id(self):
for _ in range(10):
id = generate()
self.assertEqual(len(id), 21)
for j in range(len(id)):
self.assertIn(id[j], alphabet)
def test_has_no_collisions(self):
count = 100 * 1000
used = {}
for _ in range(count):
id = generate()
self.assertIsNotNone(id in used)
used[id] = True
def test_has_options(self):
self.assertEqual(generate('a', 5), 'aaaaa')
def test_non_secure_ids(self):
for i in range(10000):
nanoid = non_secure_generate()
self.assertEqual(len(nanoid), 21)
def test_non_secure_short_ids(self):
for i in range(10000):
nanoid = non_secure_generate("12345a", 3)
self.assertEqual(len(nanoid), 3)
def test_short_secure_ids(self):
for i in range(10000):
nanoid = generate("12345a", 3)
self.assertEqual(len(nanoid), 3)
| 28.913043 | 79 | 0.557393 |
44c1c0f67e0582280dbbfa81281478c920552634 | 9,565 | py | Python | Algorithms/ProtoAlgorithm/ProtoAlgorithm.py | icsa-caps/HieraGen | 4026c1718878d2ef69dd13d3e6e10cab69174fda | [
"MIT"
] | 6 | 2020-07-07T15:45:13.000Z | 2021-08-29T06:44:29.000Z | Algorithms/ProtoAlgorithm/ProtoAlgorithm.py | icsa-caps/HieraGen | 4026c1718878d2ef69dd13d3e6e10cab69174fda | [
"MIT"
] | null | null | null | Algorithms/ProtoAlgorithm/ProtoAlgorithm.py | icsa-caps/HieraGen | 4026c1718878d2ef69dd13d3e6e10cab69174fda | [
"MIT"
] | null | null | null | import time
from typing import Tuple, List, Dict
from Algorithms.General.GenStateSets import extract_states_from_sets, StateSet, StateSets
from Algorithms.General.MergeStates import MergeStates
from Algorithms.General.Tracing.TraceTree import Trace
from DataObjects.ClassLevel import Level
from DataObjects.ClassArchitecture import Architecture
from Monitor.ProtoCCTable import *
from Graphv.ProtoCCGraph import ProtoCCGraph
from Algorithms.ProtoAlgorithm.ProtoStalling import ProtoStalling
from Algorithms.ProtoAlgorithm.ProtoNonStalling import ProtoNonStalling
from Algorithms.ProtoAlgorithm.ProtoDir import ProtoDir
from Algorithms.ProtoAlgorithm.ProtoAccessAssign import ProtoAccessAssign
from Monitor.ClassDebug import Debug
class ProtoAlgorithm(ProtoStalling, ProtoNonStalling, ProtoDir, ProtoAccessAssign, Debug):
def __init__(self, level: Level, config, dbg_term: bool = False, dbg_graph: bool = False):
ProtoStalling.__init__(self)
ProtoNonStalling.__init__(self)
ProtoDir.__init__(self)
ProtoAccessAssign.__init__(self)
Debug.__init__(self, dbg_term)
self.dbg_graph = dbg_graph
self.debug_all_generated_states = []
self.level = level
self.parser = level.parser
self.datamsgs = level.parser.getDataMsgTypes()
self.access = level.parser.getAccess()
self.evict = level.parser.getEvict()
self.archProtoGen = {}
self.renamedMessages = level.renamedMessages
self.hiddenChangeStates = level.hiddenChangeStates
self.cacheStateSets = []
self.progressMessages = level.progressMessages
""" PROTOGEN OPTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# Options Cache
self.CCconservativeInv = config.CCconservativeInv
self.nonstalling = config.nonstalling
self.maxNestingDepthCC = config.maxNestingDepthCC
# Options Directory
self.DCconservativeInv = config.DCconservativeInv
self.maxNestingDepthDC = config.maxNestingDepthDC
# Options Access Assignment
self.stableStatesOnly = config.stableStatesOnly
self.conservativeAccess = config.conservativeAccess
self.ignoreDeferedStates = config.ignoreDeferedStates
self.maxagressiveAccess = config.maxagressiveAccess
# Options Merging
self.enableStateMerging = config.enableStateMerging
self.maxMergingIter = config.maxMergingIter
self.MergeStates = MergeStates(self.maxMergingIter, self.access, self.evict)
self._ProcessArch()
def _ProcessArch(self):
self.pheader("Caches")
self.pdebug(str(self.level.cache))
self.pheader("Directories")
self.pdebug(str(self.level.directory))
# Cache
arch_name = str(self.level.cache)
talgo = time.time()
self.pheader("\nArchitecture: " + arch_name)
stablestates = [str(state) for state in self.level.cache.stable_states]
state_sets = self.level.cache.state_sets
self._ProtoGenV2(self.level.cache, state_sets, self.maxNestingDepthCC)
self._AssignAccess(state_sets, stablestates)
if self.enableStateMerging:
print("State reduction function enabled")
self.MergeStates.merge_states(state_sets)
else:
print("State reduction function disabled")
self.pdebug("Runtime: " + arch_name + " = " + str(time.time() - talgo))
self.level.cache.update_transitions()
statedict = extract_states_from_sets(state_sets)
self._pTransitions(arch_name, statedict)
# Run ProtoGen
if self.dbg_graph:
self._dArch(arch_name, statedict)
self.cacheStateSets += list(state_sets.values())
self.archProtoGen.update({arch_name: statedict})
self.level.cache.update_transitions()
# Directory
arch_name = str(self.level.directory)
talgo = time.time()
self.pheader("\nArchitecture: " + arch_name)
# General directory pre processing
stablestates = [str(state) for state in self.level.directory.stable_states]
state_sets = self.level.directory.state_sets
# ProtoGen
self._ProtoGenAlgorithm(self.level.directory, state_sets, stablestates, self.maxNestingDepthDC, self._DirectoryDefer)
#self.merge_states(state_sets)
self.pdebug("Runtime: " + arch_name + " = " + str(time.time() - talgo))
self.level.directory.update_transitions()
statedict = extract_states_from_sets(state_sets)
self._pTransitions(arch_name, statedict)
if self.dbg_graph:
self._dArch(arch_name, statedict)
self.archProtoGen.update({arch_name: statedict})
self.pdebug("")
def _ProtoGenAlgorithm(self, arch: Architecture, state_sets, stablestates, maxdepth, deferfunc):
newstates = 1
nestingdepth = 0
while newstates and nestingdepth < maxdepth:
newstates = self._GenerateTransientStates(arch, state_sets, stablestates, deferfunc)
# Update the traces
arch.update_traces()
nestingdepth += 1
#self._AppendDefferedMessages(state_sets, stablestates)
########################################################################################################################
# PUBLIC FUNCTIONS
########################################################################################################################
def getArchStates(self):
return self.archProtoGen
def getCacheStates(self):
return self.level.getCacheStates()
def getDirStates(self):
return self.level.getDirStates()
def getMaxNestingDepth(self):
return 0
def getDCconservativeInv(self):
return self.DCconservativeInv
########################################################################################################################
# DEBUG
########################################################################################################################
def _dArch(self, arch, statedict):
if self.dbg:
ProtoCCGraph("Spec: " + arch, self._pGetTransitions(statedict))
def _pTransitions(self, arch, statedict):
if self.dbg:
transitions = self._pGetTransitions(statedict)
ProtoCCTablePrinter().ptransitiontable(transitions)
def _pGetTransitions(self, statedict):
transitions = []
for state in statedict:
transitions += statedict[state].gettransitions()
transitions = sorted(transitions, key=lambda transition: (transition.getstartstate().getstatename(),
transition.getfinalstate().getstatename(),
transition.getguard(),
transition.getcond()))
return transitions
########################################################################################################################
# ProtoGen V2
########################################################################################################################
def stable_state_trace_map(self, arch: Architecture, state_sets: StateSets) -> \
Tuple[Dict[StateSet, List[Trace]], Dict[StateSet, List[Trace]]]:
start_state_set_to_access_trace_list_map: Dict[StateSet, List[Trace]] = {}
start_state_set_to_remote_trace_list_map: Dict[StateSet, List[Trace]] = {}
for state_set in state_sets:
access_traces, remote_traces = self.classify_traces_start_state(arch, state_sets[state_set])
start_state_set_to_access_trace_list_map[state_sets[state_set]] = access_traces
start_state_set_to_remote_trace_list_map[state_sets[state_set]] = remote_traces
return start_state_set_to_access_trace_list_map, start_state_set_to_remote_trace_list_map
def _ProtoGenV2(self, arch: Architecture, state_sets: StateSets, maxdepth: int):
# Make trace trees
start_state_set_to_access_trace_list_map, start_state_set_to_remote_trace_list_map = \
self.stable_state_trace_map(arch, state_sets)
# Stalling
newstates = True
nestingdepth = 0
while newstates and nestingdepth < 2:
newstates = False
for state_set in state_sets:
ret_val = self.concurrent_start_state_set_states(arch,
state_sets[state_set],
start_state_set_to_remote_trace_list_map)
newstates = newstates or ret_val
nestingdepth += 1
arch.update_traces()
# Non-stalling
enabled = False
newstates = True
nestingdepth = 0
while newstates and nestingdepth < maxdepth and (self.nonstalling or enabled):
newstates = False
for state_set in state_sets:
ret_val = self.concurrent_end_state_set_states(arch,
state_sets[state_set],
start_state_set_to_remote_trace_list_map)
newstates = newstates or ret_val
nestingdepth += 1
arch.update_traces()
| 36.930502 | 125 | 0.596968 |
3245f829c19fd5641d31efaaf7489cb8163d5905 | 928 | py | Python | tests/integration/ssh/test_mine.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | tests/integration/ssh/test_mine.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | tests/integration/ssh/test_mine.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import salt.utils.platform
from tests.support.case import SSHCase
from tests.support.helpers import slowTest
from tests.support.unit import skipIf
@skipIf(salt.utils.platform.is_windows(), "salt-ssh not available on Windows")
class SSHMineTest(SSHCase):
"""
testing salt-ssh with mine
"""
@slowTest
def test_ssh_mine_get(self):
"""
test salt-ssh with mine
"""
ret = self.run_function("mine.get", ["localhost test.arg"], wipe=False)
self.assertEqual(ret["localhost"]["args"], ["itworked"])
def tearDown(self):
"""
make sure to clean up any old ssh directories
"""
salt_dir = self.run_function("config.get", ["thin_dir"], wipe=False)
if os.path.exists(salt_dir):
shutil.rmtree(salt_dir)
| 27.294118 | 79 | 0.658405 |
1e51c2a1e757879421f2d79e73dba116d4768c2c | 393 | py | Python | pythreejs/_example_helper.py | aliddell/pythreejs | 8a2dc2c8fe9a8adb6affc8214c224e5c9d98fea1 | [
"BSD-3-Clause"
] | 451 | 2018-04-20T17:44:49.000Z | 2022-03-26T21:53:05.000Z | pythreejs/_example_helper.py | aliddell/pythreejs | 8a2dc2c8fe9a8adb6affc8214c224e5c9d98fea1 | [
"BSD-3-Clause"
] | 164 | 2018-04-19T08:34:57.000Z | 2022-03-31T12:58:53.000Z | pythreejs/_example_helper.py | aliddell/pythreejs | 8a2dc2c8fe9a8adb6affc8214c224e5c9d98fea1 | [
"BSD-3-Clause"
] | 119 | 2018-04-23T16:01:02.000Z | 2022-03-26T03:28:59.000Z |
def example_id_gen(max_n=1000):
for i in range(1, max_n):
yield 'pythree_example_model_%03d' % (i,)
def use_example_model_ids():
from ipywidgets import Widget
old_init = Widget.__init__
id_gen = example_id_gen()
def new_init(self, *args, **kwargs):
kwargs['model_id'] = next(id_gen)
old_init(self, *args, **kwargs)
Widget.__init__ = new_init
| 26.2 | 49 | 0.659033 |
8ed45f38ccc763aff3c6984958c261e2c9f6ba94 | 8,618 | py | Python | ptsr/utils/utility.py | prateek-77/rcan-it | 587904556d8127bca83690deaaa26e34e051a576 | [
"MIT"
] | 57 | 2022-01-28T04:44:42.000Z | 2022-03-31T13:26:35.000Z | ptsr/utils/utility.py | chisyliu/rcan-it | eb1794777ffef4eadd8a6a06f4419380a0b17435 | [
"MIT"
] | 6 | 2022-02-08T11:17:19.000Z | 2022-03-27T07:40:18.000Z | ptsr/utils/utility.py | chisyliu/rcan-it | eb1794777ffef4eadd8a6a06f4419380a0b17435 | [
"MIT"
] | 10 | 2022-01-28T07:31:12.000Z | 2022-03-15T01:35:03.000Z | from typing import Optional
import torch.optim.lr_scheduler as lrs
import torch.optim as optim
import torch
import imageio
import numpy as np
import matplotlib.pyplot as plt
import os
from math import log10, sqrt
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart:
self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, cfg):
self.cfg = cfg
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
self.datatest = []
if (self.cfg.SOLVER.TEST_EVERY and not self.cfg.SOLVER.TEST_ONLY):
self.datatest = self.cfg.DATASET.DATA_VAL
elif (self.cfg.SOLVER.TEST_ONLY):
self.datatest = self.cfg.DATASET.DATA_TEST
time_stamp = datetime.datetime.now().strftime("_%b%d%y_%H%M")
if not cfg.LOG.LOAD:
if not cfg.LOG.SAVE:
cfg.LOG.SAVE = now
self.dir = os.path.join('outputs', cfg.LOG.SAVE + time_stamp)
else:
self.dir = os.path.join('outputs', cfg.LOG.LOAD + time_stamp)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(
len(self.log)*self.cfg.SOLVER.TEST_EVERY))
else:
cfg.LOG.LOAD = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in self.datatest:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt')) else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.yaml'), open_type) as f:
print(cfg, file=f)
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, iteration, is_best=False, iter_start=0, is_swa=False, iter_suffix=False):
self.save_model(self.get_path('model'), trainer, iteration, is_best, is_swa, iter_suffix)
self.plot_psnr(iteration, iter_start)
torch.save(self.log, self.get_path('psnr_log.pt'))
def save_model(self, apath, trainer, iteration: int, is_best: bool = False,
is_swa: bool = False, iter_suffix: bool = False):
save_dirs = [os.path.join(apath, 'model_latest.pth.tar')]
if is_best:
save_dirs.append(os.path.join(apath, 'model_best.pth.tar'))
elif iter_suffix:
save_dirs.append(os.path.join(apath, 'model_%06d.pth.tar' % iteration))
if is_swa:
state_dict = trainer.swa_model.module.module.model.state_dict()
else:
state_dict = trainer.model.module.model.state_dict() # DP, DDP
state = {'iteration': iteration,
'state_dict': state_dict,
'optimizer': trainer.optimizer.state_dict(),
'lr_scheduler': trainer.lr_scheduler.state_dict()}
if hasattr(trainer, 'mixed_fp') and trainer.mixed_fp:
state['scaler'] = trainer.scaler.state_dict()
for filename in save_dirs:
torch.save(state, filename)
def load_model(self, pre_train, trainer, device, restart: bool = False,
test_mode: bool = False, strict: bool = True,
ignore: Optional[str] = None):
if pre_train is None:
return
state = torch.load(pre_train, map_location=device)
if isinstance(state['state_dict'], tuple):
trainer.model.module.model.load_state_dict(
state['state_dict'][0], strict=strict)
else:
pretrained_dict = state['state_dict']
if ignore is not None:
pretrained_dict = {k: v for k, v in pretrained_dict.items() if ignore not in k}
trainer.model.module.model.load_state_dict(pretrained_dict, strict=strict)
if not restart and not test_mode:
trainer.optimizer.load_state_dict(state['optimizer'])
trainer.lr_scheduler.load_state_dict(state['lr_scheduler'])
trainer.iter_start = state['iteration']
if hasattr(trainer, 'mixed_fp') and trainer.mixed_fp and 'scaler' in state:
trainer.scaler.load_state_dict(state['scaler'])
del state # release GPU memory
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def plot_psnr(self, iteration, iter_start=0):
intervel = self.cfg.SOLVER.TEST_EVERY
num_points = (iteration + 1 - iter_start) // intervel
axis = list(range(1, num_points+1))
axis = np.array(axis) * intervel + iter_start
for idx_data, d in enumerate(self.datatest):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.cfg.DATASET.DATA_SCALE):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Iterations')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None:
break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,))
for _ in range(self.n_processes)
]
for p in self.process:
p.start()
def end_background(self):
for _ in range(self.n_processes):
self.queue.put((None, None))
while not self.queue.empty():
time.sleep(1)
for p in self.process:
p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.cfg.LOG.SAVE_RESULTS:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.cfg.DATASET.RGB_RANGE)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def done(self):
self.log_file.close()
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr_torch(sr, hr, scale, rgb_range: float = 255.0,
use_gray_coeffs: bool = True):
# Input images should be in (B, C, H, W) format.
if hr.nelement() == 1:
return 0
diff = (sr - hr) / rgb_range
diff = diff[..., scale:-scale, scale:-scale]
if diff.size(1) > 1 and use_gray_coeffs:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / rgb_range
diff = diff.mul(convert).sum(dim=1)
mse = diff.pow(2).mean()
if mse == 0: # PSNR have no importance.
return 100
return -10 * log10(mse)
def calc_psnr_numpy(sr, hr, scale, rgb_range: float = 255.0):
# Input images should be in (H, W, C) format.
diff = (sr - hr)
diff = diff[scale:-scale, scale:-scale]
mse = np.mean(diff ** 2)
if mse == 0: # PSNR have no importance.
return 100
psnr = 20 * log10(rgb_range / sqrt(mse))
return psnr
| 33.929134 | 101 | 0.57554 |
ae27bd9490194d6aa55dc3e61c22bca6aa7fe21e | 2,239 | py | Python | run_tests.py | bpuderer/python-test-env | ed5d1f1b977560cc9bc52952500b93060fc11e6a | [
"MIT"
] | null | null | null | run_tests.py | bpuderer/python-test-env | ed5d1f1b977560cc9bc52952500b93060fc11e6a | [
"MIT"
] | 1 | 2021-03-31T18:50:05.000Z | 2021-03-31T18:50:05.000Z | run_tests.py | bpuderer/python-test-env | ed5d1f1b977560cc9bc52952500b93060fc11e6a | [
"MIT"
] | null | null | null | """Wrapper for nose2"""
import argparse
import glob
import os
import shlex
import subprocess
def main():
parser = argparse.ArgumentParser(description='nose2 wrapper script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('tests', nargs='*',
help='Only run specified tests. Ex. tests.test_example \
Ex. tests.test_example.ExampleTestCase \
Ex. tests.test_example.ExampleTestCase.test_env1_config')
parser.add_argument('--testenv', '-te', default='DEFAULT',
help='Case sensitive section in test_settings.cfg')
parser.add_argument('--attr', '-A', nargs='+', help='Select tests by attribute. \
Args are logically OR\'d. Arg with comma delimeter(s) is AND\'d. \
Ex. slow tags=tag2 Ex. slow,tags=tag2')
parser.add_argument('--quiet', '-q', action='store_true', default=False)
parser.add_argument('--collect_only', '-c', action='store_true',
default=False, help='Collect and output test names, don\'t run tests')
parser.add_argument('--xml', action='store_true',
default=False, help='Write test results xUnit XML')
args = parser.parse_args()
os.environ['PY_TEST_ENV'] = args.testenv
# cleanup previous run
for f in glob.glob('reports/*.xml'):
os.remove(f)
for f in glob.glob('log/*'):
os.remove(f)
for f in glob.glob('screenshots/*'):
os.remove(f)
cmd = 'python -m nose2 --config framework/nose2.cfg'
if args.tests:
cmd += ' ' + ' '.join(args.tests)
if not args.quiet:
cmd += ' -v'
if args.collect_only:
cmd += ' --collect-only'
if args.attr:
cmd += ' -A ' + ' -A '.join(args.attr)
if args.xml:
cmd += ' --junit-xml'
# to combine stdout and stderr
#cp = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cp = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(cp.stdout.decode())
print(cp.stderr.decode())
if __name__ == "__main__":
main()
| 33.924242 | 94 | 0.595355 |
5cc1f3013cc110703d1c49adcf3b02c5e72cf4d1 | 424 | py | Python | soluciones/problema9.py | hernan-erasmo/project-euler | d68aa90c5fe2bf733bec54af5a786a2c144783bc | [
"Unlicense"
] | null | null | null | soluciones/problema9.py | hernan-erasmo/project-euler | d68aa90c5fe2bf733bec54af5a786a2c144783bc | [
"Unlicense"
] | null | null | null | soluciones/problema9.py | hernan-erasmo/project-euler | d68aa90c5fe2bf733bec54af5a786a2c144783bc | [
"Unlicense"
] | null | null | null | def main():
r = 2
encontrada = False
while not encontrada:
for t in mf.dickson_triples(r):
print "r = " + str(r) + " - Probando con ", t, " (suma: " + str(t[0] + t[1] + t[2]) + ")"
if (t[0] + t[1] + t[2]) == 1000:
print "La terna es: ", t, " y el producto de las componentes es: " + str(t[0]*t[1]*t[2])
encontrada = True
break
r += 2
if __name__ == '__main__':
import mis_funciones as mf
main()
| 24.941176 | 92 | 0.54717 |
be1f456e38313545306bea88aaa18675b0dc097a | 21,733 | py | Python | src/genie/libs/parser/nxos/tests/test_show_arp.py | tylersiemers/genieparser | f18d49f9ca268d23df34faaf8b2212ca29b6882e | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/nxos/tests/test_show_arp.py | tylersiemers/genieparser | f18d49f9ca268d23df34faaf8b2212ca29b6882e | [
"Apache-2.0"
] | 1 | 2019-04-02T16:51:56.000Z | 2019-04-02T16:51:56.000Z | src/genie/libs/parser/nxos/tests/test_show_arp.py | tylersiemers/genieparser | f18d49f9ca268d23df34faaf8b2212ca29b6882e | [
"Apache-2.0"
] | 1 | 2021-01-29T17:31:33.000Z | 2021-01-29T17:31:33.000Z | # Python
import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.nxos.show_arp import ShowIpArpDetailVrfAll, \
ShowIpArpSummaryVrfAll, \
ShowIpArpstatisticsVrfAll
#=========================================================
# Unit test for show ip arp detail vrf all
#=========================================================
class test_show_ip_arp_detail_vrf_all(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'interfaces': {
'Ethernet1/1': {
'ipv4': {
'neighbors': {
'10.1.3.5': {
'age': '-',
'ip': '10.1.3.5',
'link_layer_address': 'aaaa.bbbb.cccc',
'origin': 'static',
'physical_interface': 'Ethernet1/1'}
}
}
},
'Ethernet1/1.1': {
'ipv4': {
'neighbors': {
'192.168.4.2': {
'age': '00:01:53',
'ip': '192.168.4.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/1.1'}
}
}
},
'Ethernet1/1.2': {
'ipv4': {
'neighbors': {
'192.168.154.2': {
'age': '00:00:47',
'ip': '192.168.154.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/1.2'}
}
}
},
'Ethernet1/1.4': {
'ipv4': {
'neighbors': {
'192.168.106.2': {
'age': '00:08:42',
'ip': '192.168.106.2',
'link_layer_address': '000c.292a.1eaf',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/1.4'}
}
}
},
'Ethernet1/2.1': {
'ipv4': {
'neighbors': {
'192.168.154.2': {
'age': '00:18:24',
'ip': '192.168.154.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/2.1'}
}
}
},
'Ethernet1/2.2': {
'ipv4': {
'neighbors': {
'192.168.51.2': {
'age': '00:05:21',
'ip': '192.168.51.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/2.2'}
}
}
},
'Ethernet1/2.4': {
'ipv4': {
'neighbors': {
'192.168.9.2': {
'age': '00:10:51',
'ip': '192.168.9.2',
'link_layer_address': '000c.2904.5840',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/2.4'}
}
}
},
'Ethernet1/4.100': {
'ipv4': {
'neighbors': {
'10.51.1.101': {
'age': '00:01:28',
'ip': '10.51.1.101',
'link_layer_address': '0000.71c7.6e61',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/4.100'}
}
}
},
'Ethernet1/4.101': {
'ipv4': {
'neighbors': {
'10.154.1.101': {
'age': '00:01:28',
'ip': '10.154.1.101',
'link_layer_address': '0000.71c7.75c1',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/4.101'}
}
}
},
'Ethernet1/4.200': {
'ipv4': {
'neighbors': {
'10.76.1.101': {
'age': '00:01:28',
'ip': '10.76.1.101',
'link_layer_address': '0000.0068.ce6f',
'origin': 'dynamic',
'physical_interface': 'Ethernet1/4.200'}
}
}
},
'mgmt0': {
'ipv4': {
'neighbors': {
'10.1.7.1': {
'age': '00:17:15',
'ip': '10.1.7.1',
'link_layer_address': '0012.7f57.ac80',
'origin': 'dynamic',
'physical_interface': 'mgmt0'},
'10.1.7.250': {
'age': '00:14:24',
'ip': '10.1.7.250',
'link_layer_address': '0050.5682.7915',
'origin': 'dynamic',
'physical_interface': 'mgmt0'},
'10.1.7.253': {
'age': '00:10:22',
'ip': '10.1.7.253',
'link_layer_address': '0050.56a4.a9fc',
'origin': 'dynamic',
'physical_interface': 'mgmt0'}
}
}
}
}
}
golden_output = {'execute.return_value': '''
N95_1# show ip arp detail vrf all
Flags: * - Adjacencies learnt on non-active FHRP router
+ - Adjacencies synced via CFSoE
# - Adjacencies Throttled for Glean
CP - Added via L2RIB, Control plane Adjacencies
PS - Added via L2RIB, Peer Sync
RO - Re-Originated Peer Sync Entry
IP ARP Table for all contexts
Total number of entries: 12
Address Age MAC Address Interface Physical Interface Flags
10.1.7.1 00:17:15 0012.7f57.ac80 mgmt0 mgmt0
10.1.7.250 00:14:24 0050.5682.7915 mgmt0 mgmt0
10.1.7.253 00:10:22 0050.56a4.a9fc mgmt0 mgmt0
10.1.3.5 - aaaa.bbbb.cccc Ethernet1/1 Ethernet1/1
192.168.4.2 00:01:53 000c.292a.1eaf Ethernet1/1.1 Ethernet1/1.1
192.168.154.2 00:00:47 000c.292a.1eaf Ethernet1/1.2 Ethernet1/1.2
192.168.106.2 00:08:42 000c.292a.1eaf Ethernet1/1.4 Ethernet1/1.4
192.168.154.2 00:18:24 000c.2904.5840 Ethernet1/2.1 Ethernet1/2.1
192.168.51.2 00:05:21 000c.2904.5840 Ethernet1/2.2 Ethernet1/2.2
192.168.9.2 00:10:51 000c.2904.5840 Ethernet1/2.4 Ethernet1/2.4
10.51.1.101 00:01:28 0000.71c7.6e61 Ethernet1/4.100 Ethernet1/4.100
10.154.1.101 00:01:28 0000.71c7.75c1 Ethernet1/4.101 Ethernet1/4.101
10.76.1.101 00:01:28 0000.0068.ce6f Ethernet1/4.200 Ethernet1/4.200
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpArpDetailVrfAll(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowIpArpDetailVrfAll(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
#=========================================================
# Unit test for show ip arp summary vrf all
#=========================================================
class test_show_ip_arp_summary_vrf_all(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'incomplete': 0,
'throttled': 0,
'resolved': 12,
'total': 12,
'unknown': 0}
golden_output = {'execute.return_value': '''
N95_1# show ip arp summary
IP ARP Table - Adjacency Summary
Resolved : 12
Incomplete : 0 (Throttled : 0)
Unknown : 0
Total : 12
'''
}
golden_parsed_output_1 = {
'incomplete': 0,
'throttled': 0,
'resolved': 12,
'total': 12,
'unknown': 0}
golden_output_1 = {'execute.return_value': '''
N95_1# show ip arp summary vrf all
IP ARP Table - Adjacency Summary
Resolved : 12
Incomplete : 0 (Throttled : 0)
Unknown : 0
Total : 12
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpArpSummaryVrfAll(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowIpArpSummaryVrfAll(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_1)
obj = ShowIpArpSummaryVrfAll(device=self.device)
parsed_output = obj.parse(vrf='all')
self.assertEqual(parsed_output, self.golden_parsed_output_1)
#=========================================================
# Unit test for show ip arp statistics vrf all
#=========================================================
class test_show_ip_arp_statistics_vrf_all(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'statistics': {
'adjacency': {
'adjacency_adds': 43,
'adjacency_deletes': 12,
'adjacency_timeouts': 12,
'failed_due_to_limits': 0},
'received': {
'anycast_proxy_arp': 0,
'dropped': 28218,
'dropped_server_port': 0,
'drops_details': {
'appeared_on_a_wrong_interface': 0,
'arp_refresh_requests_received_from_clients': 0,
'context_not_created': 0,
'directed_broadcast_source': 0,
'dropping_due_to_tunneling_failures': 0,
'glean_requests_recv_count': 71,
'grat_arp_received_on_proxy': 0,
'incorrect_length': 0,
'invalid_context': 0,
'invalid_destination_ip_address': 0,
'invalid_hardwaretype': 0,
'invalid_layer2_address_length': 0,
'invalid_layer3_address_length': 0,
'invalid_protocol_packet': 0,
'invalid_source_ip_address': 28,
'invalid_source_mac_address': 0,
'l2_packet_on_untrusted_l2_port': 0,
'l2fm_query_failed_for_a_l2address': 0,
'no_mem_to_create_per_intf_structure': 0,
'non_active_fhrp_dest_ip': 0,
'non_local_destination_ip_address': 20421,
'number_of_signals_received_from_l2rib': 0,
'packet_with_vip_on_standby_fhrp': 0,
'received_before_arp_initialization': 0,
'requests_came_for_exising_entries': 15,
'requests_came_on_a_l2_interface': 0,
'source_address_mismatch_with_subnet': 0,
'source_mac_address_is_our_own': 0},
'enhanced_proxy_arp': 0,
'fastpath': 0,
'l2_port_track_proxy_arp': 0,
'l2_replies': 0,
'l2_requests': 0,
'local_proxy_arp': 0,
'proxy_arp': 0,
'replies': 6582,
'requests': 22632,
'snooped': 0,
'total': 0,
'tunneled': 0},
'sent': {
'dropped': 0,
'drops_details': {
'adjacency_couldnt_be_added': 0,
'arp_refresh_skipped_over_core_and_flooded': 0,
'client_enqueue_failed': 0,
'context_not_created': 0,
'dest_not_reachable_for_proxy_arp': 0,
'dest_unreachable_for_enhanced_proxy': 0,
'destnination_is_our_own_ip': 26,
'destnination_on_l2_port_tracked': 0,
'invalid_context': 0,
'invalid_dest_ip': 0,
'invalid_ifindex': 0,
'invalid_local_proxy_arp': 0,
'invalid_proxy_arp': 0,
'invalid_src_ip': 0,
'mbuf_operation_failed': 0,
'null_source_ip': 0,
'null_source_mac': 0,
'unattached_ip': 0,
'vip_is_not_active': 0},
'gratuitous': 58,
'l2_replies': 0,
'l2_requests': 0,
'replies': 998,
'requests': 2102,
'total': 3158,
'tunneled': 0}
}
}
golden_output = {'execute.return_value': '''
N95_1# show ip arp statistics vrf all
ARP State Machine Stats
ARP packet statistics for all contexts
Sent:
Total 3158, Requests 2102, Replies 998, Requests on L2 0, Replies on L2 0,
Gratuitous 58, Tunneled 0, Dropped 0
Send packet drops details:
MBUF operation failed : 0
Context not yet created : 0
Invalid context : 0
Invalid ifindex : 0
Invalid SRC IP : 0
Invalid DEST IP : 0
Destination is our own IP : 26
Unattached IP : 0
Adjacency Couldn't be added : 0
Null Source IP : 0
Null Source MAC : 0
Client Enqueue Failed : 0
Dest. not reachable for proxy arp : 0
Dest. unreachable for enhanced proxy : 0
Dest. on L2 port being tracked : 0
Invalid Local proxy arp : 0
Invalid proxy arp : 0
VIP is not active : 0
ARP refresh skipped over core and flooded on server : 0
Received:
Total 0, Requests 22632, Replies 6582, Requests on L2 0, Replies on L2 0
Proxy arp 0, Local-Proxy arp 0, Enhanced Proxy arp 0, Anycast proxy Proxy arp 0, L2 Port-track Proxy arp 0, Tunneled 0, Fastpath 0, Snooped 0, Dropped 28218 on Server Port 0
Received packet drops details:
Appeared on a wrong interface : 0
Incorrect length : 0
Invalid protocol packet : 0
Invalid Hardware type : 0
Invalid context : 0
Context not yet created : 0
Invalid layer 2 address length : 0
Invalid layer 3 address length : 0
Invalid source IP address : 28
Source IP address is our own : 0
No mem to create per intf structure : 0
Source address mismatch with subnet : 0
Directed broadcast source : 0
Invalid destination IP address : 0
Non-local destination IP address : 20421
Non-active FHRP dest IP address. Learn and drop
: 0
Invalid source MAC address : 0
Source MAC address is our own : 0
Received before arp initialization : 0
L2 packet on proxy-arp-enabled interface
: 0
L2 packet on untrusted L2 port : 0
Packet with VIP on standby FHRP : 0
Grat arp received on proxy-arp-enabled interface
: 0
Requests came for exising entries : 15
Requests came on a L2 interface : 0
L2FM query failed for a L2 Address : 0
Dropping due to tunneling failures : 0
Glean requests recv count : 71
ARP refresh requests received from clients: 0
Number of Signals received from L2rib : 0
ARP adjacency statistics
Adds 43, Deletes 12, Timeouts 12
Failed due to limits: 0
'''
}
golden_parsed_output_1 = {'statistics': {'adjacency': {'adjacency_adds': 5,
'adjacency_deletes': 0,
'adjacency_timeouts': 0},
'received': {'anycast_proxy_arp': 0,
'dropped': 7,
'dropped_server_port': 0,
'drops_details': {'appeared_on_a_wrong_interface': 0,
'context_not_created': 0,
'directed_broadcast_source': 0,
'dropping_due_to_tunneling_failures': 0,
'grat_arp_received_on_proxy': 0,
'incorrect_length': 0,
'invalid_context': 0,
'invalid_destination_ip_address': 0,
'invalid_hardwaretype': 0,
'invalid_layer2_address_length': 0,
'invalid_layer3_address_length': 0,
'invalid_protocol_packet': 0,
'invalid_source_ip_address': 0,
'invalid_source_mac_address': 0,
'l2_packet_on_untrusted_l2_port': 0,
'l2fm_query_failed_for_a_l2address': 0,
'no_mem_to_create_per_intf_structure': 0,
'non_active_fhrp_dest_ip': 0,
'non_local_destination_ip_address': 7,
'packet_with_vip_on_standby_fhrp': 0,
'received_before_arp_initialization': 0,
'requests_came_for_exising_entries': 0,
'requests_came_on_a_l2_interface': 0,
'source_address_mismatch_with_subnet': 0,
'source_mac_address_is_our_own': 0},
'enhanced_proxy_arp': 0,
'fastpath': 0,
'l2_port_track_proxy_arp': 0,
'l2_replies': 0,
'l2_requests': 0,
'local_proxy_arp': 0,
'proxy_arp': 0,
'replies': 55,
'requests': 5,
'snooped': 0,
'total': 67,
'tunneled': 0},
'sent': {'dropped': 0,
'drops_details': {'adjacency_couldnt_be_added': 0,
'client_enqueue_failed': 0,
'context_not_created': 0,
'dest_not_reachable_for_proxy_arp': 0,
'dest_unreachable_for_enhanced_proxy': 0,
'destnination_is_our_own_ip': 0,
'destnination_on_l2_port_tracked': 0,
'invalid_context': 0,
'invalid_dest_ip': 0,
'invalid_ifindex': 0,
'invalid_local_proxy_arp': 0,
'invalid_proxy_arp': 0,
'invalid_src_ip': 0,
'mbuf_operation_failed': 0,
'null_source_ip': 0,
'null_source_mac': 0,
'unattached_ip': 0,
'vip_is_not_active': 0},
'gratuitous': 2,
'l2_replies': 0,
'l2_requests': 0,
'replies': 5,
'requests': 57,
'total': 64,
'tunneled': 0}}}
golden_output_1 = {'execute.return_value': '''
nx-osv9000-1# show ip arp statistics
ARP packet statistics for context default
Sent:
Total 64, Requests 57, Replies 5, Requests on L2 0, Replies on L2 0,
Gratuitous 2, Tunneled 0, Dropped 0
Send packet drops details:
MBUF operation failed : 0
Context not yet created : 0
Invalid context : 0
Invalid ifindex : 0
Invalid SRC IP : 0
Invalid DEST IP : 0
Destination is our own IP : 0
Unattached IP : 0
Adjacency Couldn't be added : 0
Null Source IP : 0
Null Source MAC : 0
Client Enqueue Failed : 0
Dest. not reachable for proxy arp : 0
Dest. unreachable for enhanced proxy: 0
Dest. on L2 port being tracked : 0
Invalid Local proxy arp : 0
Invalid proxy arp : 0
VIP is not active : 0
Received:
Total 67, Requests 5, Replies 55, Requests on L2 0, Replies on L2 0
Proxy arp 0, Local-Proxy arp 0, Enhanced Proxy arp 0, Anycast proxy Proxy arp 0, L2 Port-track Proxy arp 0, Tunneled 0, Fastpath 0, Snooped 0, Dropped 7, on Server Port 0
Received packet drops details:
Appeared on a wrong interface : 0
Incorrect length : 0
Invalid protocol packet : 0
Invalid Hardware type : 0
Invalid context : 0
Context not yet created : 0
Invalid layer 2 address length : 0
Invalid layer 3 address length : 0
Invalid source IP address : 0
Source IP address is our own : 0
No mem to create per intf structure : 0
Source address mismatch with subnet : 0
Directed broadcast source : 0
Invalid destination IP address : 0
Non-local destination IP address : 7
Non-active FHRP dest IP address. Learn and drop
: 0
Invalid source MAC address : 0
Source MAC address is our own : 0
Received before arp initialization : 0
L2 packet on proxy-arp-enabled interface
: 0
L2 packet on untrusted L2 port : 0
Packet with VIP on standby FHRP : 0
Grat arp received on proxy-arp-enabled interface
: 0
Requests came for exising entries : 0
Requests came on a l2 interface : 0
L2FM query failed for a L2 Address : 0
Dropping due to tunneling failures : 0
ARP adjacency statistics
Adds 5, Deletes 0, Timeouts 0
nx-osv9000-1#
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpArpstatisticsVrfAll(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowIpArpstatisticsVrfAll(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_1)
obj = ShowIpArpstatisticsVrfAll(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_1)
if __name__ == '__main__':
unittest.main() | 35.627869 | 181 | 0.526849 |
9ec0ac976de49670c7b742bba14e3d657730bdc3 | 5,863 | py | Python | faster_rcnn/rpn_proposal.py | tendence/faster_rcnn | 55810bc759482acc1a047f949cc0bda942329df1 | [
"MIT"
] | 1 | 2017-12-08T02:44:40.000Z | 2017-12-08T02:44:40.000Z | faster_rcnn/rpn_proposal.py | tendence/faster_rcnn | 55810bc759482acc1a047f949cc0bda942329df1 | [
"MIT"
] | null | null | null | faster_rcnn/rpn_proposal.py | tendence/faster_rcnn | 55810bc759482acc1a047f949cc0bda942329df1 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Copyright 2017, Mengxiao Lin <linmx0130@gmail.com>
import mxnet as mx
import numpy as np
from .anchor_generator import generate_anchors, map_anchors
from .utils import bbox_inverse_transform, bbox_overlaps, bbox_transform, bbox_clip
from .nms import nms
from .config import cfg
def proposal_train(rpn_cls, rpn_reg, gt, feature_shape, image_shape, ctx):
# Stop gradient to stop gradient recording
rpn_cls = mx.nd.stop_gradient(rpn_cls)
rpn_reg = mx.nd.stop_gradient(rpn_reg)
# Get basic information of the feature and the image
_n, _c, f_height, f_width = feature_shape
_in, _ic, img_height, img_width = image_shape
rpn_cls = rpn_cls.reshape((1, -1, 2, f_height, f_width))
anchors_count = rpn_cls.shape[1]
# Recover RPN prediction with anchors
ref_anchors = generate_anchors(base_size=16, ratios=cfg.anchor_ratios, scales=cfg.anchor_scales)
anchors = map_anchors(ref_anchors, rpn_reg.shape, img_height, img_width, ctx)
anchors = anchors.reshape((1, -1, 4, f_height, f_width))
anchors = mx.nd.transpose(anchors, (0, 3, 4, 1, 2))
rpn_anchor_scores = mx.nd.softmax(mx.nd.transpose(rpn_cls, (0, 3, 4, 1, 2)), axis=4)[:,:,:,:,1]
rpn_reg = mx.nd.transpose(rpn_reg.reshape((1, -1, 4, f_height, f_width)), (0, 3, 4, 1, 2))
with mx.autograd.pause():
rpn_bbox_pred = bbox_inverse_transform(anchors.reshape((-1, 4)), rpn_reg.reshape((-1, 4)))
rpn_bbox_pred = bbox_clip(rpn_bbox_pred, img_height, img_width)
rpn_bbox_pred = rpn_bbox_pred.reshape((1, f_height, f_width, anchors_count, 4))
# Use NMS to filter out too many boxes
rpn_bbox_pred = rpn_bbox_pred.asnumpy().reshape((-1, 4))
rpn_anchor_scores = rpn_anchor_scores.asnumpy().reshape((-1, ))
rpn_anchor_scores, rpn_bbox_pred = nms(rpn_anchor_scores, rpn_bbox_pred, cfg.rpn_nms_thresh, use_top_n=cfg.bbox_count_before_nms)
rpn_bbox_pred = mx.nd.array(rpn_bbox_pred, ctx)
del rpn_anchor_scores
# append ground truth
rpn_bbox_pred = mx.nd.concatenate([rpn_bbox_pred, gt[0][:,:4]])
# assign label for rpn_bbox_pred
overlaps = bbox_overlaps(rpn_bbox_pred, gt[0][:, :4].reshape((-1, 4)))
gt_assignment = mx.nd.argmax(overlaps, axis=1).asnumpy().astype(np.int32)
max_overlaps = mx.nd.max(overlaps, axis=1).asnumpy()
gt_labels = gt[0][:, 4].reshape((-1,)).asnumpy()
gt_bboxes = gt[0][:, :4].reshape((-1, 4)).asnumpy()
cls_labels = gt_labels[gt_assignment]
rpn_bbox_pred_np = rpn_bbox_pred.asnumpy()
reg_target = gt_bboxes[gt_assignment, :]
cls_labels = cls_labels * (max_overlaps >= cfg.rcnn_fg_thresh)
# sample positive and negative ROIs
fg_inds = np.where(max_overlaps >= cfg.rcnn_fg_thresh)[0]
bg_inds = np.where((max_overlaps >= cfg.rcnn_bg_lo_thresh) * (max_overlaps < cfg.rcnn_fg_thresh))[0]
fg_nums = int(cfg.rcnn_train_sample_size * cfg.rcnn_train_fg_fraction)
bg_nums = cfg.rcnn_train_sample_size - fg_nums
if (len(fg_inds) > fg_nums):
fg_inds = np.random.choice(fg_inds, size=fg_nums, replace=False)
if (len(bg_inds) > bg_nums):
bg_inds = np.random.choice(bg_inds, size=bg_nums, replace=False)
cls_labels = np.concatenate([cls_labels[fg_inds], cls_labels[bg_inds]])
reg_target = np.concatenate([reg_target[fg_inds], reg_target[bg_inds]])
rpn_bbox_pred_np = np.concatenate([rpn_bbox_pred_np[fg_inds], rpn_bbox_pred_np[bg_inds]])
cls_labels = mx.nd.array(cls_labels, ctx)
reg_target = mx.nd.array(reg_target, ctx)
rpn_bbox_pred = mx.nd.array(rpn_bbox_pred_np, ctx)
reg_target = bbox_transform(rpn_bbox_pred, reg_target)
# Shape reg_target into 4 * num_classes
reg_large_target = mx.nd.zeros((reg_target.shape[0], 4 * cfg.num_classes), ctx)
for i in range(cls_labels.shape[0]):
cur_label = int(cls_labels[i].asscalar())
if (cur_label != 0):
reg_large_target[i, cur_label*4: (cur_label+1)*4] = reg_target[i, :]
return rpn_bbox_pred, reg_large_target, cls_labels
def proposal_test(rpn_cls, rpn_reg, feature_shape, image_shape, ctx):
# Stop gradient to stop gradient recording
rpn_cls = mx.nd.stop_gradient(rpn_cls)
rpn_reg = mx.nd.stop_gradient(rpn_reg)
# Get basic information of the feature and the image
_n, _c, f_height, f_width = feature_shape
_in, _ic, img_height, img_width = image_shape
rpn_cls = rpn_cls.reshape((1, -1, 2, f_height, f_width))
anchors_count = rpn_cls.shape[1]
# Recover RPN prediction with anchors
ref_anchors = generate_anchors(base_size=16, ratios=cfg.anchor_ratios, scales=cfg.anchor_scales)
anchors = map_anchors(ref_anchors, rpn_reg.shape, img_height, img_width, ctx)
anchors = anchors.reshape((1, -1, 4, f_height, f_width))
anchors = mx.nd.transpose(anchors, (0, 3, 4, 1, 2))
rpn_anchor_scores = mx.nd.softmax(mx.nd.transpose(rpn_cls, (0, 3, 4, 1, 2)), axis=4)[:,:,:,:,1]
rpn_reg = mx.nd.transpose(rpn_reg.reshape((1, -1, 4, f_height, f_width)), (0, 3, 4, 1, 2))
rpn_bbox_pred = bbox_inverse_transform(anchors.reshape((-1, 4)), rpn_reg.reshape((-1, 4)))
rpn_bbox_pred = bbox_clip(rpn_bbox_pred, img_height, img_width)
rpn_bbox_pred = rpn_bbox_pred.reshape((1, f_height, f_width, anchors_count, 4))
# Use NMS to filter out too many boxes
rpn_bbox_pred = rpn_bbox_pred.asnumpy().reshape((-1, 4))
rpn_anchor_scores = rpn_anchor_scores.asnumpy().reshape((-1, ))
rpn_anchor_scores, rpn_bbox_pred = nms(rpn_anchor_scores, rpn_bbox_pred, cfg.rpn_nms_thresh, use_top_n=cfg.bbox_count_before_nms)
rpn_bbox_pred = mx.nd.array(rpn_bbox_pred, ctx)
del rpn_anchor_scores
# Keep first cfg.rcnn_test_sample_size boxes
if rpn_bbox_pred.shape[0] > cfg.rcnn_test_sample_size:
rpn_bbox_pred = rpn_bbox_pred[:cfg.rcnn_test_sample_size, :]
return rpn_bbox_pred
| 49.686441 | 133 | 0.711752 |
6b05a475bdc626d99997616a9c027a2f62bc8320 | 753 | py | Python | website.py | MrL3X/mr_l3x.github.io | b4c0c5360f5d86ed73ca250d38c251030908d9f7 | [
"MIT"
] | 1 | 2018-04-06T00:37:39.000Z | 2018-04-06T00:37:39.000Z | website.py | MrL3X/Flask_Website | b4c0c5360f5d86ed73ca250d38c251030908d9f7 | [
"MIT"
] | null | null | null | website.py | MrL3X/Flask_Website | b4c0c5360f5d86ed73ca250d38c251030908d9f7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from flask import Flask, flash, redirect, render_template, request, session, abort
from random import randint
app = Flask(__name__)
app.config['SECRET_KEY'] = 'My website'
@app.route("/")
def index():
return render_template('index.html', title='Home')
@app.route('/')
def about():
return render_template('index.html', title='Aboute Me')
@app.route('/projects')
def programmation():
return render_template('projects.html', title='Projects')
@app.route('/events')
def roadtrip():
return render_template('events.html', title='Events')
@app.route('/experiences')
def passions():
return render_template('experiences.html', title='Experiences')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| 25.1 | 82 | 0.702523 |
219fffa25aa59a3e7dada81d4fb072b867a27ba4 | 1,882 | py | Python | config/configure-secrets.py | adeptvin1/AdvantEDGE | e619e7bf7a442b8d652df6b6e8b45dca005059fc | [
"Apache-2.0"
] | 43 | 2019-09-17T23:08:38.000Z | 2022-03-11T21:51:48.000Z | config/configure-secrets.py | adeptvin1/AdvantEDGE | e619e7bf7a442b8d652df6b6e8b45dca005059fc | [
"Apache-2.0"
] | 51 | 2019-09-18T14:38:15.000Z | 2022-03-27T16:34:07.000Z | config/configure-secrets.py | adeptvin1/AdvantEDGE | e619e7bf7a442b8d652df6b6e8b45dca005059fc | [
"Apache-2.0"
] | 32 | 2019-09-17T19:40:00.000Z | 2022-03-24T22:23:35.000Z | #!/usr/bin/python
import sys
import yaml
import subprocess
def usage():
print('''
NAME
configure-secrets - Create or delete platform deployment secrets from FILE
SYNOPSIS
configure-secrets <ACTION> FILE
ACTION
-s, set Set secrets
-d, del Delete secrets
FILE
File containing secrets to be provisioned/removed
''')
# Parse secrets file
def parse(fname):
print ('\n>>> Parsing secrets file')
with open(fname, 'r') as stream:
secrets = {}
try:
secrets = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
print('ERROR: failed to parse yaml file')
exit(1)
return secrets
# Add provided secrets
def add(secrets):
print('\n>>> Setting secrets')
if not bool(secrets):
print('no secrets to add')
return
for secret, fields in secrets.items():
if not bool(fields):
print('skipping secret with no fields: ' + secret)
continue
entries = ''
for field, value in fields.items():
entries += ' --from-literal=' + field + '=' + value
subprocess.call('kubectl create secret generic ' + secret + entries, shell=True)
# Remove provided secrets
def remove(secrets):
print('\n>>> Removing secrets')
if not bool(secrets):
print('no secrets to remove')
return
for secret, fields in secrets.items():
subprocess.call('kubectl delete secret ' + secret, shell=True)
# Parse arguments
argCount = len(sys.argv)
if argCount != 3:
print('ERROR: invalid number of args')
usage()
sys.exit(1)
action = sys.argv[1]
fname = sys.argv[2]
# Run command
if (action == '-s' or action == 'set'):
secrets = parse(fname)
remove(secrets)
add(secrets)
elif (action == '-d' or action == 'del'):
secrets = parse(fname)
remove(secrets)
else:
print('ERROR: invalid action')
usage()
sys.exit(1)
print('')
| 21.146067 | 84 | 0.637088 |
164bf2b5278b5fcaaed44c98c74542d9c15bab88 | 11,803 | py | Python | examples/zonediff.py | Ashiq5/dnspython | 5449af5318d88bada34f661247f3bcb16f58f057 | [
"ISC"
] | 1,666 | 2015-01-02T17:46:14.000Z | 2022-03-30T07:27:32.000Z | examples/zonediff.py | felixonmars/dnspython | 2691834df42aab74914883fdf26109aeb62ec647 | [
"ISC"
] | 591 | 2015-01-16T12:19:49.000Z | 2022-03-30T21:32:11.000Z | examples/zonediff.py | felixonmars/dnspython | 2691834df42aab74914883fdf26109aeb62ec647 | [
"ISC"
] | 481 | 2015-01-14T04:14:43.000Z | 2022-03-30T19:28:52.000Z | #!/usr/bin/env python3
#
# Small library and commandline tool to do logical diffs of zonefiles
# ./zonediff -h gives you help output
#
# Requires dnspython to do all the heavy lifting
#
# (c)2009 Dennis Kaarsemaker <dennis@kaarsemaker.net>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""See diff_zones.__doc__ for more information"""
from typing import cast, Union, Any # pylint: disable=unused-import
__all__ = ['diff_zones', 'format_changes_plain', 'format_changes_html']
try:
import dns.zone
import dns.node
except ImportError:
raise SystemExit("Please install dnspython")
def diff_zones(zone1, # type: dns.zone.Zone
zone2, # type: dns.zone.Zone
ignore_ttl=False,
ignore_soa=False
): # type: (...) -> list
"""diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False) -> changes
Compares two dns.zone.Zone objects and returns a list of all changes
in the format (name, oldnode, newnode).
If ignore_ttl is true, a node will not be added to this list if the
only change is its TTL.
If ignore_soa is true, a node will not be added to this list if the
only changes is a change in a SOA Rdata set.
The returned nodes do include all Rdata sets, including unchanged ones.
"""
changes = []
for name in zone1:
namestr = str(name)
n1 = cast(dns.node.Node, zone1.get_node(namestr))
n2 = cast(dns.node.Node, zone2.get_node(namestr))
if not n2:
changes.append((str(name), n1, n2))
elif _nodes_differ(n1, n2, ignore_ttl, ignore_soa):
changes.append((str(name), n1, n2))
for name in zone2:
n3 = cast(dns.node.Node, zone1.get_node(name))
if not n3:
n4 = cast(dns.node.Node, zone2.get_node(name))
changes.append((str(name), n3, n4))
return changes
def _nodes_differ(n1, # type: dns.node.Node
n2, # type: dns.node.Node
ignore_ttl, # type: bool
ignore_soa # type: bool
): # type: (...) -> bool
if ignore_soa or not ignore_ttl:
# Compare datasets directly
for r in n1.rdatasets:
if ignore_soa and r.rdtype == dns.rdatatype.SOA:
continue
if r not in n2.rdatasets:
return True
if not ignore_ttl:
return r.ttl != n2.find_rdataset(r.rdclass, r.rdtype).ttl
for r in n2.rdatasets:
if ignore_soa and r.rdtype == dns.rdatatype.SOA:
continue
if r not in n1.rdatasets:
return True
assert False
else:
return n1 != n2
def format_changes_plain(oldf, # type: str
newf, # type: str
changes, # type: list
ignore_ttl=False
): # type: (...) -> str
"""format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
Given 2 filenames and a list of changes from diff_zones, produce diff-like
output. If ignore_ttl is True, TTL-only changes are not displayed"""
ret = "--- {}\n+++ {}\n".format(oldf, newf)
for name, old, new in changes:
ret += "@ %s\n" % name
if not old:
for r in new.rdatasets:
ret += "+ %s\n" % str(r).replace('\n', '\n+ ')
elif not new:
for r in old.rdatasets:
ret += "- %s\n" % str(r).replace('\n', '\n+ ')
else:
for r in old.rdatasets:
if r not in new.rdatasets or (
r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and
not ignore_ttl
):
ret += "- %s\n" % str(r).replace('\n', '\n+ ')
for r in new.rdatasets:
if r not in old.rdatasets or (
r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and
not ignore_ttl
):
ret += "+ %s\n" % str(r).replace('\n', '\n+ ')
return ret
def format_changes_html(oldf, # type: str
newf, # type: str
changes, # type: list
ignore_ttl=False
): # type: (...) -> str
"""format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str
Given 2 filenames and a list of changes from diff_zones, produce nice html
output. If ignore_ttl is True, TTL-only changes are not displayed"""
ret = '''<table class="zonediff">
<thead>
<tr>
<th> </th>
<th class="old">%s</th>
<th class="new">%s</th>
</tr>
</thead>
<tbody>\n''' % (oldf, newf)
for name, old, new in changes:
ret += ' <tr class="rdata">\n <td class="rdname">%s</td>\n' % name
if not old:
for r in new.rdatasets:
ret += (
' <td class="old"> </td>\n'
' <td class="new">%s</td>\n'
) % str(r).replace('\n', '<br />')
elif not new:
for r in old.rdatasets:
ret += (
' <td class="old">%s</td>\n'
' <td class="new"> </td>\n'
) % str(r).replace('\n', '<br />')
else:
ret += ' <td class="old">'
for r in old.rdatasets:
if r not in new.rdatasets or (
r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and
not ignore_ttl
):
ret += str(r).replace('\n', '<br />')
ret += '</td>\n'
ret += ' <td class="new">'
for r in new.rdatasets:
if r not in old.rdatasets or (
r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and
not ignore_ttl
):
ret += str(r).replace('\n', '<br />')
ret += '</td>\n'
ret += ' </tr>\n'
return ret + ' </tbody>\n</table>'
# Make this module usable as a script too.
def main(): # type: () -> None
import argparse
import subprocess
import sys
import traceback
usage = """%prog zonefile1 zonefile2 - Show differences between zones in a diff-like format
%prog [--git|--bzr|--rcs] zonefile rev1 [rev2] - Show differences between two revisions of a zonefile
The differences shown will be logical differences, not textual differences.
"""
p = argparse.ArgumentParser(usage=usage)
p.add_argument('-s', '--ignore-soa', action="store_true", default=False, dest="ignore_soa",
help="Ignore SOA-only changes to records")
p.add_argument('-t', '--ignore-ttl', action="store_true", default=False, dest="ignore_ttl",
help="Ignore TTL-only changes to Rdata")
p.add_argument('-T', '--traceback', action="store_true", default=False, dest="tracebacks",
help="Show python tracebacks when errors occur")
p.add_argument('-H', '--html', action="store_true", default=False, dest="html",
help="Print HTML output")
p.add_argument('-g', '--git', action="store_true", default=False, dest="use_git",
help="Use git revisions instead of real files")
p.add_argument('-b', '--bzr', action="store_true", default=False, dest="use_bzr",
help="Use bzr revisions instead of real files")
p.add_argument('-r', '--rcs', action="store_true", default=False, dest="use_rcs",
help="Use rcs revisions instead of real files")
opts, args = p.parse_args()
opts.use_vc = opts.use_git or opts.use_bzr or opts.use_rcs
def _open(what, err): # type: (Union[list,str], str) -> Any
if isinstance(what, list):
# Must be a list, open subprocess
try:
proc = subprocess.Popen(what, stdout=subprocess.PIPE)
proc.wait()
if proc.returncode == 0:
return proc.stdout
sys.stderr.write(err + "\n")
except Exception:
sys.stderr.write(err + "\n")
if opts.tracebacks:
traceback.print_exc()
else:
# Open as normal file
try:
return open(what, 'rb')
except IOError:
sys.stderr.write(err + "\n")
if opts.tracebacks:
traceback.print_exc()
if not opts.use_vc and len(args) != 2:
p.print_help()
sys.exit(64)
if opts.use_vc and len(args) not in (2, 3):
p.print_help()
sys.exit(64)
# Open file descriptors
if not opts.use_vc:
oldn, newn = args
else:
if len(args) == 3:
filename, oldr, newr = args
oldn = "{}:{}".format(oldr, filename)
newn = "{}:{}".format(newr, filename)
else:
filename, oldr = args
newr = None
oldn = "{}:{}".format(oldr, filename)
newn = filename
old, new = None, None
oldz, newz = None, None
if opts.use_bzr:
old = _open(["bzr", "cat", "-r" + oldr, filename],
"Unable to retrieve revision {} of {}".format(oldr, filename))
if newr is not None:
new = _open(["bzr", "cat", "-r" + newr, filename],
"Unable to retrieve revision {} of {}".format(newr, filename))
elif opts.use_git:
old = _open(["git", "show", oldn],
"Unable to retrieve revision {} of {}".format(oldr, filename))
if newr is not None:
new = _open(["git", "show", newn],
"Unable to retrieve revision {} of {}".format(newr, filename))
elif opts.use_rcs:
old = _open(["co", "-q", "-p", "-r" + oldr, filename],
"Unable to retrieve revision {} of {}".format(oldr, filename))
if newr is not None:
new = _open(["co", "-q", "-p", "-r" + newr, filename],
"Unable to retrieve revision {} of {}".format(newr, filename))
if not opts.use_vc:
old = _open(oldn, "Unable to open %s" % oldn)
if not opts.use_vc or newr is None:
new = _open(newn, "Unable to open %s" % newn)
if not old or not new:
sys.exit(65)
# Parse the zones
try:
oldz = dns.zone.from_file(old, origin='.', check_origin=False)
except dns.exception.DNSException:
sys.stderr.write("Incorrect zonefile: %s\n" % old)
if opts.tracebacks:
traceback.print_exc()
try:
newz = dns.zone.from_file(new, origin='.', check_origin=False)
except dns.exception.DNSException:
sys.stderr.write("Incorrect zonefile: %s\n" % new)
if opts.tracebacks:
traceback.print_exc()
if not oldz or not newz:
sys.exit(65)
changes = diff_zones(oldz, newz, opts.ignore_ttl, opts.ignore_soa)
changes.sort()
if not changes:
sys.exit(0)
if opts.html:
print(format_changes_html(oldn, newn, changes, opts.ignore_ttl))
else:
print(format_changes_plain(oldn, newn, changes, opts.ignore_ttl))
sys.exit(1)
if __name__ == '__main__':
main()
| 37.951768 | 101 | 0.555791 |
810a4e596faf1458f9cf82eef33c3e5240c64dd7 | 705 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractWwwOtakubuCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractWwwOtakubuCom.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractWwwOtakubuCom.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractWwwOtakubuCom(item):
'''
Parser for 'www.otakubu.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Hyaku Ma No Omo', 'Hyaku Ma No Omo', 'translated'),
('Hyaku Ma No Aruji', 'Hyaku Ma No Aruji', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 32.045455 | 104 | 0.584397 |
094ecc7db53347bede899d0e32ccffb01abac1d2 | 268 | py | Python | DayOf/regression.py | samuelfu/MIT_Trading | 608464adf08cc9ede838e3947c5ae5ffb31a43f1 | [
"MIT"
] | 30 | 2018-12-10T21:17:35.000Z | 2022-03-01T04:19:56.000Z | DayOf/regression.py | samuelfu/MIT_Trading | 608464adf08cc9ede838e3947c5ae5ffb31a43f1 | [
"MIT"
] | 8 | 2020-11-12T02:40:57.000Z | 2021-02-25T05:50:03.000Z | DayOf/regression.py | samuelfu/MIT_Trading | 608464adf08cc9ede838e3947c5ae5ffb31a43f1 | [
"MIT"
] | 11 | 2018-12-23T23:40:10.000Z | 2022-03-14T16:49:03.000Z | import pandas as pd
from pandas import DataFrame
from sklearn import linear_model
import statsmodels.api as sm
data = pd.read_csv('train_data/simple.csv')
df = DataFrame(data, columns=['p', 'prev_price', 't', 'x1', 'x2'])
X = df[['x1', 'x2']]
Y = df['']
print(df) | 19.142857 | 66 | 0.679104 |
5782ba1779dd4d4ebd0d263ee746161642c5a60f | 1,674 | py | Python | patreon/schemas/user.py | monokrome/patreon-python | 09aad6800c6b9f33e026712a43e5f7e7c7a2a391 | [
"Apache-2.0"
] | null | null | null | patreon/schemas/user.py | monokrome/patreon-python | 09aad6800c6b9f33e026712a43e5f7e7c7a2a391 | [
"Apache-2.0"
] | null | null | null | patreon/schemas/user.py | monokrome/patreon-python | 09aad6800c6b9f33e026712a43e5f7e7c7a2a391 | [
"Apache-2.0"
] | null | null | null | class Attributes(object):
email = 'email'
first_name = 'first_name'
last_name = 'last_name'
full_name = 'full_name'
gender = 'gender'
status = 'status'
vanity = 'vanity'
about = 'about'
facebook_id = 'facebook_id'
image_url = 'image_url'
thumb_url = 'thumb_url'
thumbnails = 'thumbnails'
youtube = 'youtube'
twitter = 'twitter'
facebook = 'facebook'
twitch = 'twitch'
is_suspended = 'is_suspended'
is_deleted = 'is_deleted'
is_nuked = 'is_nuked'
created = 'created'
url = 'url'
like_count = 'like_count'
comment_count = 'comment_count'
is_creator = 'is_creator'
hide_pledges = 'hide_pledges'
two_factor_enabled = 'two_factor_enabled'
class Relationships(object):
pledges = 'pledges'
cards = 'cards'
follows = 'follows'
campaign = 'campaign'
presence = 'presence'
session = 'session'
locations = 'locations'
current_user_follow = 'current_user_follow'
pledge_to_current_user = 'pledge_to_current_user'
default_attributes = [
Attributes.email,
Attributes.first_name,
Attributes.last_name,
Attributes.full_name,
Attributes.gender,
Attributes.status,
Attributes.vanity,
Attributes.about,
Attributes.facebook_id,
Attributes.image_url,
Attributes.thumb_url,
Attributes.thumbnails,
Attributes.youtube,
Attributes.twitter,
Attributes.facebook,
Attributes.twitch,
Attributes.is_suspended,
Attributes.is_deleted,
Attributes.is_nuked,
Attributes.created,
Attributes.url,
]
default_relationships = [
Relationships.campaign,
Relationships.pledges,
]
| 23.914286 | 53 | 0.677419 |
0c90af87b94573f87a34b9198ec4f20ce4b963c1 | 935 | py | Python | labeler_api/app/db/__init__.py | UMass-Rescue/RescueLabeler | 6a6e57ed2d6f8c05b398b7068a464998abd4bf9b | [
"MIT"
] | null | null | null | labeler_api/app/db/__init__.py | UMass-Rescue/RescueLabeler | 6a6e57ed2d6f8c05b398b7068a464998abd4bf9b | [
"MIT"
] | null | null | null | labeler_api/app/db/__init__.py | UMass-Rescue/RescueLabeler | 6a6e57ed2d6f8c05b398b7068a464998abd4bf9b | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import database_exists, create_database
import logging
from app.db import base # noqa
from app.db.base_class import Base
import app.core.config as config
logger = logging.getLogger("rescue-labeler")
engine = create_engine(config.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def init_db() -> None:
"""Initialize tables with base metadata"""
if not database_exists(engine.url):
create_database(engine.url)
logger.info("Created db")
Base.metadata.create_all(bind=engine)
logger.info("initialized_tables")
class DBContextManager:
def __init__(self):
self.db = SessionLocal()
def __enter__(self):
return self.db
def __exit__(self, exc_type, exc_value, traceback):
self.db.close()
| 28.333333 | 75 | 0.739037 |
c8244f33e9417242192afdc8438ef6c61bcfc4f1 | 1,684 | py | Python | Visualization/visualizing_feature_collection.py | monocilindro/qgis-earthengine-examples | 82aea8926d34ed3f4ad4a4a345ddbd225819d28f | [
"MIT"
] | 646 | 2019-12-03T06:09:03.000Z | 2022-03-28T03:37:08.000Z | Visualization/visualizing_feature_collection.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 10 | 2019-12-30T03:42:44.000Z | 2021-05-22T07:34:07.000Z | Visualization/visualizing_feature_collection.py | csaybar/qgis-earthengine-examples | ba8942683834d2847ff3246bdd1859b36e50fe44 | [
"MIT"
] | 219 | 2019-12-06T02:20:53.000Z | 2022-03-30T15:14:27.000Z | import ee
from ee_plugin import Map
# Load a FeatureCollection from a table dataset: 'RESOLVE' ecoregions.
ecoregions = ee.FeatureCollection('RESOLVE/ECOREGIONS/2017')
# Display as default and with a custom color.
Map.addLayer(ecoregions, {}, 'default display')
Map.addLayer(ecoregions, {'color': 'FF0000'}, 'colored')
Map.addLayer(ecoregions.draw(**{'color': '006600', 'strokeWidth': 5}), {}, 'drawn')
# Create an empty image into which to paint the features, cast to byte.
empty = ee.Image().byte()
# Paint all the polygon edges with the same number and 'width', display.
outline = empty.paint(**{
'featureCollection': ecoregions,
'color': 1,
'width': 3
})
Map.addLayer(outline, {'palette': 'FF0000'}, 'edges')
# Paint the edges with different colors, display.
outlines = empty.paint(**{
'featureCollection': ecoregions,
'color': 'BIOME_NUM',
'width': 4
})
palette = ['FF0000', '00FF00', '0000FF']
Map.addLayer(outlines, {'palette': palette, 'max': 14}, 'different color edges')
# Paint the edges with different colors and 'width's.
outlines = empty.paint(**{
'featureCollection': ecoregions,
'color': 'BIOME_NUM',
'width': 'NNH'
})
Map.addLayer(outlines, {'palette': palette, 'max': 14}, 'different color, width edges')
# Paint the interior of the polygons with different colors.
fills = empty.paint(**{
'featureCollection': ecoregions,
'color': 'BIOME_NUM',
})
Map.addLayer(fills, {'palette': palette, 'max': 14}, 'colored fills')
# Paint both the fill and the edges.
filledOutlines = empty.paint(ecoregions, 'BIOME_NUM').paint(ecoregions, 0, 2)
Map.addLayer(filledOutlines, {'palette': ['000000'] + palette, 'max': 14}, 'edges and fills')
| 29.034483 | 93 | 0.696556 |
00cd9668d735b60ff529522a27aa5ad1735b0404 | 2,849 | py | Python | optimade-python-tools/tests/server/query_params/conftest.py | attlevafritt/tfya92-groupa-optimade-python-tools | 43e462ce70cf50a20f21a0aefb15d39db265773b | [
"MIT"
] | null | null | null | optimade-python-tools/tests/server/query_params/conftest.py | attlevafritt/tfya92-groupa-optimade-python-tools | 43e462ce70cf50a20f21a0aefb15d39db265773b | [
"MIT"
] | null | null | null | optimade-python-tools/tests/server/query_params/conftest.py | attlevafritt/tfya92-groupa-optimade-python-tools | 43e462ce70cf50a20f21a0aefb15d39db265773b | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture
def check_include_response(get_good_response):
"""Fixture to check "good" `include` response"""
from typing import Union, List, Set
def inner(
request: str,
expected_included_types: Union[List, Set],
expected_included_resources: Union[List, Set],
expected_relationship_types: Union[List, Set] = None,
server: str = "regular",
):
response = get_good_response(request, server)
response_data = (
response["data"]
if isinstance(response["data"], list)
else [response["data"]]
)
included_resource_types = list({_["type"] for _ in response["included"]})
assert sorted(expected_included_types) == sorted(included_resource_types), (
f"Expected relationship types: {expected_included_types}. "
f"Does not match relationship types in response's included field: {included_resource_types}",
)
if expected_relationship_types is None:
expected_relationship_types = expected_included_types
relationship_types = set()
for entry in response_data:
relationship_types.update(set(entry.get("relationships", {}).keys()))
assert sorted(expected_relationship_types) == sorted(relationship_types), (
f"Expected relationship types: {expected_relationship_types}. "
f"Does not match relationship types found in response data: {relationship_types}",
)
included_resources = [_["id"] for _ in response["included"]]
assert len(included_resources) == len(expected_included_resources), response[
"included"
]
assert sorted(set(included_resources)) == sorted(expected_included_resources)
return inner
@pytest.fixture
def check_required_fields_response(get_good_response):
"""Fixture to check "good" `required_fields` response"""
from optimade.server import mappers
get_mapper = {
"links": mappers.LinksMapper,
"references": mappers.ReferenceMapper,
"structures": mappers.StructureMapper,
}
def inner(
endpoint: str,
known_unused_fields: set,
expected_fields: set,
server: str = "regular",
):
expected_fields |= (
get_mapper[endpoint].get_required_fields() - known_unused_fields
)
expected_fields.add("attributes")
request = f"/{endpoint}?response_fields={','.join(expected_fields)}"
response = get_good_response(request, server)
response_fields = set()
for entry in response["data"]:
response_fields.update(set(entry.keys()))
response_fields.update(set(entry["attributes"].keys()))
assert sorted(expected_fields) == sorted(response_fields)
return inner
| 35.17284 | 105 | 0.651457 |
2101ec56934901cc5cb4d068f7570f0778674bc5 | 3,553 | py | Python | tests.py | richardpanda/kraken | d37c0a27f41e53846c9a11f562dea328bdb21248 | [
"MIT"
] | null | null | null | tests.py | richardpanda/kraken | d37c0a27f41e53846c9a11f562dea328bdb21248 | [
"MIT"
] | 1 | 2021-06-01T22:09:12.000Z | 2021-06-01T22:09:12.000Z | tests.py | richardpanda/kraken | d37c0a27f41e53846c9a11f562dea328bdb21248 | [
"MIT"
] | null | null | null | import json
import unittest
from app import create_app, db
from app.models import User
from config import TestingConfig
from unittest.mock import patch
class APITests(unittest.TestCase):
def setUp(self):
self.app = create_app(TestingConfig)
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_confirm_code(self):
user = User(phone_number='+12345678999',
start_hour=8,
end_hour=22)
db.session.add(user)
db.session.commit()
request_body = {'code': user.code}
response = self.client.post(
f'/api/phone-number/{user.phone_number}/confirm', data=request_body)
self.assertEqual(response.status_code, 200)
self.assertFalse(user.is_pending)
def test_confirm_code_with_mismatch_codes(self):
user = User(phone_number='+12345678999',
start_hour=8,
end_hour=22)
db.session.add(user)
db.session.commit()
request_body = {'code': 'invalid code'}
response = self.client.post(
f'/api/phone-number/{user.phone_number}/confirm', data=request_body)
response_body = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertEqual(response_body['message'], 'Mismatch codes.')
self.assertTrue(user.is_pending)
def test_confirm_code_with_unregistered_phone_number(self):
request_body = {'code': '123456'}
unregistered_phone_number = '+11111111111'
response = self.client.post(
f'/api/phone-number/{unregistered_phone_number}/confirm', data=request_body)
response_body = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertEqual(response_body['message'],
'Phone number has not been registered.')
@patch('app.api.create_twilio_client')
def test_register(self, twilio_client):
request_body = {
'phone_number': '+12345678999',
'start_hour': 8,
'end_hour': 22,
}
response = self.client.post('/api/register', data=request_body)
self.assertEqual(response.status_code, 200)
user = db.session.query(User).first()
self.assertIsNotNone(user.code)
self.assertEqual(len(user.code), 6)
self.assertTrue(user.is_pending)
twilio_client().api.account.messages.create.assert_called_with(
to='+12345678999',
from_=TestingConfig.TWILIO_PHONE_NUMBER,
body=f'Your code is {user.code}')
def test_register_with_existing_phone_number(self):
user = User(phone_number='+12345678999',
start_hour=8,
end_hour=22)
db.session.add(user)
db.session.commit()
request_body = {
'phone_number': user.phone_number,
'start_hour': user.start_hour,
'end_hour': user.end_hour,
}
response = self.client.post('/api/register', data=request_body)
response_body = json.loads(response.data)
self.assertEqual(response.status_code, 400)
self.assertEqual(response_body['message'],
'Phone number has been registered already.')
if __name__ == '__main__':
unittest.main()
| 33.205607 | 88 | 0.622291 |
fe39ab84d614565922decd300ded46aea2f32f2f | 10,130 | py | Python | multinn/models/multinn/multinn_jamming.py | ilya16/MultINN | 2173d6c4e67d4619e89ad93b2b8d9cc46aafc1c8 | [
"MIT"
] | 2 | 2019-07-22T14:00:40.000Z | 2021-01-11T09:04:46.000Z | multinn/models/multinn/multinn_jamming.py | ilya16/MultINN | 2173d6c4e67d4619e89ad93b2b8d9cc46aafc1c8 | [
"MIT"
] | 1 | 2020-05-04T12:53:26.000Z | 2020-05-04T12:53:26.000Z | multinn/models/multinn/multinn_jamming.py | ilya16/MultINN | 2173d6c4e67d4619e89ad93b2b8d9cc46aafc1c8 | [
"MIT"
] | 1 | 2019-12-30T13:00:13.000Z | 2019-12-30T13:00:13.000Z | """Implementation of a Jamming MultINN model with per-track modules and no communication."""
import tensorflow as tf
from models.multinn.core.multi_encoder_nn import MultIEncoderNN
from utils.training import compute_gradients
class MultINNJamming(MultIEncoderNN):
"""Jamming MultINN model.
Multiple per-track Encoders + Multiple per-track Generators
with no inter-communication.
Works with multi-track sequences and learns the track specific features.
The models with Feedback inherit from the Jamming model.
"""
def __init__(self, config, params, name='MultINN-jamming'):
"""Initializes Jamming MultiNN model.
Args:
config: A dict of the experiment configurations.
params: A dict of the model parameters.
name: Optional model name to use as a prefix when adding operations.
"""
super().__init__(config, params, name=name)
self._mode = 'jamming'
def _init_generators(self, generator_class):
"""Initializes Generators of the Jamming MultINN model.
Args:
generator_class: A Python class of the Generators.
Returns:
generators: A list of the Jamming MultINN Generators.
"""
generators = [
generator_class(
num_dims=self._num_dims_generator,
num_hidden=self._params['generator']['num_hidden'],
num_hidden_rnn=self._params['generator']['num_hidden_rnn'],
keep_prob=self.keep_prob,
track_name=self.tracks[i])
for i in range(self.num_tracks)
]
return generators
def _build_generators(self, mode='eval'):
"""Building the Jamming MultINN Generators.
Inputs to the Generators are outputs from the Encoders.
Args:
Build mode for optimizing the graph size.
"""
for i in range(self.num_tracks):
with tf.variable_scope(f'generator_inputs/{self.tracks[i]}'):
generator_inputs = self._x_encoded[i][:, 0:-1, :]
with tf.variable_scope(f'generator_targets/{self.tracks[i]}'):
generator_targets = self._x_encoded[i][:, 1:, :]
self.generators[i].build(x=generator_inputs, y=generator_targets,
lengths=self._lengths, is_train=self._is_train, mode=mode)
def _build_generator_outputs(self):
"""Returns the outputs from the Jamming MultINN Generators.
Returns:
generator_outputs: A list of flattened per-track outputs from the Generators.
"""
x_hidden = []
for i in range(self.num_tracks):
with tf.variable_scope(f'{self.tracks[i]}'):
x_hidden.append(self.generators[i].forward())
return x_hidden
def _decode_generator_outputs(self):
"""Decodes the Generators' outputs through the Encoders.
Returns:
cond_probs: A list of flattened per-track conditional probabilities
for the decoded outputs.
decoded_outputs: A list of flattened per-track decoded generator outputs.
"""
outputs_probs, outputs = [], []
for i in range(self.num_tracks):
with tf.variable_scope(f'{self.tracks[i]}'):
cond_probs, decodings = self.encoders[i].decode(self._x_hidden[i])
outputs_probs.append(cond_probs)
outputs.append(decodings)
return outputs_probs, outputs
def generate(self, num_steps):
"""Generating new sequences with the Jamming MultINN.
The generative process starts by encoding the batch of input sequences
and going through them to obtain the final inputs' state.
Then, the Jamming per-track Generators generate `num_steps` new steps
by sampling from the modelled conditional distributions.
Finally, samples are decoded through the per-track Encoders to get the
samples in original multi-track input format.
Args:
num_steps: A number of time steps to generate.
Returns:
samples: A batch of generated sequences,
sized `[batch_size, num_steps, num_dims, num_tracks]`
"""
music = []
for i in range(self.num_tracks):
# Generator forward pass
with tf.variable_scope(f'{self.tracks[i]}'):
samples_h = self.generators[i].generate(self._x_encoded[i], num_steps)
# Decoding inputs into the original format
with tf.variable_scope(f'samples/{self.tracks[i]}/'):
_, samples = self.encoders[i].decode(samples_h)
music.append(samples)
# Making the samples multi-track
with tf.variable_scope('samples/'):
return tf.stack(music, axis=3, name='music')
def pretrain_generators(self, optimizer, lr, separate_losses=False):
"""Constructs training ops for pre-training per-track MultINN Generators.
Args:
optimizer: tf.Optimizer object that computes gradients.
lr: A learning rate for weight updates.
separate_losses: `bool` indicating whether to optimize
the separate losses for each track or optimize
the average loss for all tracks.
Returns:
init_ops: A list of weight initialization ops.
update_ops: A list of weights update ops.
metrics: A dictionary of metric names and metric ops.
metrics_upd: A list of metric update ops.
summaries: A dict of tf.Summary objects for model's
metrics, weights and training gradients.
"""
# Pre-train each Generator separately but in parallel
return self._train_generators(optimizer, lr, pretrain=True, separate_losses=separate_losses)
def train_generators(self, optimizer, lr, separate_losses=False):
"""Constructs training ops for training per-track MultINN Generators.
Args:
optimizer: tf.Optimizer object that computes gradients.
lr: A learning rate for weight updates.
separate_losses: `bool` indicating whether to optimize
the separate losses for each track or optimize
the average loss for all tracks.
Returns:
init_ops: A list of weight initialization ops.
update_ops: A list of weights update ops.
metrics: A dictionary of metric names and metric ops.
metrics_upd: A list of metric update ops.
summaries: A dict of tf.Summary objects for model's
metrics, weights and training gradients.
"""
# Training generators by minimizing mean track loss
init_ops, update_ops, metrics, metrics_upd, summaries = self._train_generators(
optimizer, lr, pretrain=False, separate_losses=separate_losses
)
# Combining with summaries on Encoder level
summaries['metrics'] = tf.summary.merge([self.summaries['metrics'], summaries['metrics']])
metrics_upd = self.metrics_upd + metrics_upd
metrics['global'] = self.metrics
return init_ops, update_ops, metrics, metrics_upd, summaries
def _train_generators(self, optimizer, lr, pretrain=False, separate_losses=False):
"""Auxiliary function for constructing training ops for MultINN Generators.
Combines the functionality needed to build pre-training and training ops.
Args:
optimizer: tf.Optimizer object that computes gradients.
lr: A learning rate for weight updates.
pretrain: `bool` indicating whether to pre-train or train
the Generators.
separate_losses: `bool` indicating whether to optimize
the separate losses for each track or optimize
the average loss for all tracks.
Returns:
init_ops: A list of weight initialization ops.
update_ops: A list of weights update ops.
metrics: A dictionary of metric names and metric ops.
metrics_upd: A list of metric update ops.
summaries: A dict of tf.Summary objects for model's
metrics, weights and training gradients.
"""
# (Pre-)training Fenerators by minimizing mean track loss
init_ops, update_ops = [], []
track_metrics, track_metrics_upd, track_summaries = [], [], []
# Collecting per-track metrics
for i in range(self.num_tracks):
if pretrain:
train_func = self.generators[i].pretrain
else:
train_func = self.generators[i].train
init_ops_i, update_ops_i, metrics_i, metrics_upd_i, summaries_i = train_func(
optimizer, lr, run_optimizer=separate_losses
)
init_ops += init_ops_i
update_ops += update_ops_i
track_metrics.append(metrics_i)
track_metrics_upd.append(metrics_upd_i)
track_summaries.append(summaries_i)
# Combining metrics and summaries
metrics, metrics_upd, summaries = self._combine_track_metrics(
track_metrics, track_metrics_upd, track_summaries,
global_scope=f"metrics/{self.generators[0].name}/global/"
)
if not separate_losses:
# Optimizing the mean track loss
trainable_variables = self.trainable_generator_variables + self.trainable_feedback_variables
update_ops, summaries['gradients'] = compute_gradients(
optimizer, metrics['batch/loss'], trainable_variables
)
summaries['weights'] = tf.summary.merge(
[tf.summary.histogram(var.name, var) for var in trainable_variables]
)
return init_ops, update_ops, metrics, metrics_upd, summaries
@property
def trainable_feedback_variables(self):
"""An empty list of the Jamming MultINN trainable feedback module variables."""
return []
| 40.358566 | 104 | 0.635044 |
43932733f006cf948b923cdc4db44bb69cd252d1 | 673 | py | Python | nextbox_ui_plugin/filters.py | MaxRink/nextbox-ui-plugin | c0d7dca18754e2f436b3178fcdd6e8cf0f09c884 | [
"MIT"
] | 2 | 2021-11-11T11:49:17.000Z | 2022-01-04T02:10:45.000Z | nextbox_ui_plugin/filters.py | MaxRink/nextbox-ui-plugin | c0d7dca18754e2f436b3178fcdd6e8cf0f09c884 | [
"MIT"
] | null | null | null | nextbox_ui_plugin/filters.py | MaxRink/nextbox-ui-plugin | c0d7dca18754e2f436b3178fcdd6e8cf0f09c884 | [
"MIT"
] | 1 | 2022-01-15T13:14:20.000Z | 2022-01-15T13:14:20.000Z | import django_filters
from dcim.models import Device, Site, Region
class TopologyFilterSet(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
queryset=Device.objects.all(),
to_field_name='id',
field_name='id',
label='Device (ID)',
)
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
region_id = django_filters.ModelMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name='site__region',
label='Region (ID)',
)
class Meta:
model = Device
fields = ['id', 'name', ]
| 25.884615 | 57 | 0.640416 |
ed39557a3888a553539c7a0f9188defdef8513bf | 141 | py | Python | solutions/rosalind/ini4/ini4.py | deniscostadsc/playground | 11fa8e2b708571940451f005e1f55af0b6e5764a | [
"MIT"
] | 18 | 2015-01-22T04:08:51.000Z | 2022-01-08T22:36:47.000Z | solutions/rosalind/ini4/ini4.py | deniscostadsc/playground | 11fa8e2b708571940451f005e1f55af0b6e5764a | [
"MIT"
] | 4 | 2016-04-25T12:32:46.000Z | 2021-06-15T18:01:30.000Z | solutions/rosalind/ini4/ini4.py | deniscostadsc/playground | 11fa8e2b708571940451f005e1f55af0b6e5764a | [
"MIT"
] | 25 | 2015-03-02T06:21:51.000Z | 2021-09-12T20:49:21.000Z | import sys
a, b = sys.stdin.read().replace('\n', '').split()
a = int(a)
b = int(b)
print(sum([i for i in range(a, b + 1) if i % 2 != 0]))
| 15.666667 | 54 | 0.524823 |
70c846d63852d4b1907c9305bfec7adc03e349b5 | 642 | py | Python | main.py | WiraDKP/cow_detection_opencv | 0e64a148747be879138cbd7cd452cf99c077a879 | [
"MIT"
] | 1 | 2021-07-04T07:30:58.000Z | 2021-07-04T07:30:58.000Z | main.py | WiraDKP/cow_detection_opencv | 0e64a148747be879138cbd7cd452cf99c077a879 | [
"MIT"
] | null | null | null | main.py | WiraDKP/cow_detection_opencv | 0e64a148747be879138cbd7cd452cf99c077a879 | [
"MIT"
] | null | null | null | import cv2
from detect import CowDetection
from jcopvision.io import MediaReader, key_pressed, create_sized_window
import config as cfg
if __name__ == "__main__":
media = MediaReader(cfg.FILE_PATH)
model = CowDetection(cfg.MODEL_PATH, cfg.CONFIG_PATH, cfg.LABEL_PATH)
create_sized_window(500, media.aspect_ratio, cfg.WINDOW_NAME)
for frame in media.read():
bbox, labels, scores = model.predict(frame, min_confidence=cfg.MIN_CONF, max_iou=cfg.MAX_IOU)
frame = model.draw(frame, bbox, labels, scores)
cv2.imshow(cfg.WINDOW_NAME, frame)
if key_pressed("q"):
break
media.close() | 33.789474 | 101 | 0.714953 |
fb1ef10600d279876845080702f6cad7155d070e | 1,205 | py | Python | qiskit/transpiler/passes/decompose.py | gaurav-iiser/qiskit-terra | 3554a7e9ab5c77432ed5ccaa106fb8dc15553756 | [
"Apache-2.0"
] | 2 | 2021-09-06T19:25:36.000Z | 2021-11-17T10:46:12.000Z | qiskit/transpiler/passes/decompose.py | gaurav-iiser/qiskit-terra | 3554a7e9ab5c77432ed5ccaa106fb8dc15553756 | [
"Apache-2.0"
] | null | null | null | qiskit/transpiler/passes/decompose.py | gaurav-iiser/qiskit-terra | 3554a7e9ab5c77432ed5ccaa106fb8dc15553756 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Pass for decompose a gate in a circuit."""
from qiskit.transpiler.basepasses import TransformationPass
class Decompose(TransformationPass):
"""
Expand a gate in a circuit using its decomposition rules.
"""
def __init__(self, gate=None):
"""
Args:
gate (qiskit.circuit.gate.Gate): Gate to decompose.
"""
super().__init__()
self.gate = gate
def run(self, dag):
"""Expand a given gate into its decomposition.
Args:
dag(DAGCircuit): input dag
Returns:
DAGCircuit: output dag where gate was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
decomposition_rules = node.op.decompositions()
# TODO: allow choosing other possible decompositions
decomposition_dag = decomposition_rules[0]
dag.substitute_node_with_dag(node, decomposition_dag)
return dag
| 28.023256 | 77 | 0.631535 |
24939e6d7e1adf40ecc8025ee07daccca11e314b | 10,474 | py | Python | mrjob/logs/mixin.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | mrjob/logs/mixin.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | mrjob/logs/mixin.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp and Contributors
# Copyright 2017 Yelp
# Copyright 2018 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner mixin for counters and probable cause of failure.
This relies on passing around a *log_interpretation* dictionary, which
is described in detail in :py:mod:`mrjob.logs`.
This mixin doesn't yet handle step logs because the EMR and Hadoop runners
handle them so differently. It's up to you to fill in the 'step' field
of your log interpretation; the mixin can't do much without it because
it needs it for the job/application ID.
Your runner should generally have one log interpretation per step,
though the mixin doesn't care how or where you store them.
"""
from logging import getLogger
from mrjob.compat import uses_yarn
from mrjob.logs.counters import _format_counters
from mrjob.logs.counters import _pick_counters
from mrjob.logs.errors import _pick_error
from mrjob.logs.errors import _pick_error_attempt_ids
from mrjob.logs.history import _interpret_history_log
from mrjob.logs.history import _ls_history_logs
from mrjob.logs.task import _interpret_task_logs
from mrjob.logs.task import _interpret_spark_task_logs
from mrjob.logs.task import _ls_task_logs
from mrjob.logs.task import _ls_spark_task_logs
from mrjob.step import _is_spark_step_type
log = getLogger(__name__)
# a callback for _interpret_task_logs(). Breaking it out to make
# testing easier
def _log_parsing_task_log(log_path):
log.info(' Parsing task log: %s' % log_path)
class LogInterpretationMixin(object):
"""Mix this in to your runner class to simplify log interpretation."""
# this mixin is meant to be tightly bound to MRJobRunner, but
# currently it only relies on self.fs and self.get_hadoop_version()
### stuff to redefine ###
def _stream_history_log_dirs(self, output_dir=None):
"""Yield lists of directories (usually, URIs) to search for history
logs in.
Usually, you'll want to add logging messages (e.g.
'Searching for history logs in ...'
:param output_dir: Output directory for step (optional), to look
for logs (e.g. on Cloudera).
"""
return ()
def _stream_task_log_dirs(self, application_id=None, output_dir=None):
"""Yield lists of directories (usually, URIs) to search for task
logs in.
Usually, you'll want to add logging messages (e.g.
'Searching for task syslogs in...')
:param application_id: YARN application ID (optional), so we can ls
the relevant subdirectory of `userlogs/` rather than the whole
thing
:param output_dir: Output directory for step (optional), to look
for logs (e.g. on Cloudera).
"""
# sometimes pre-YARN logs are organized by job ID, but not always,
# so we don't bother with job_id; just ls() the entire userlogs
# dir and depend on regexes to find the right subdir.
return ()
def _get_step_log_interpretation(self, log_interpretation, step_type):
"""Return interpretation of the step log. Either implement
this, or fill ``'step'`` yourself (e.g. from Hadoop binary's
output."""
return None
### stuff to call ###
def _pick_counters(self, log_interpretation, step_type):
"""Pick counters from our log interpretation, interpreting
history logs if need be."""
if _is_spark_step_type(step_type):
return {}
counters = _pick_counters(log_interpretation)
if self._read_logs():
if not counters:
log.info('Attempting to fetch counters from logs...')
self._interpret_step_logs(log_interpretation, step_type)
counters = _pick_counters(log_interpretation)
if not counters:
self._interpret_history_log(log_interpretation)
counters = _pick_counters(log_interpretation)
return counters
def _pick_error(self, log_interpretation, step_type):
"""Pick probable cause of failure (only call this if job fails)."""
if self._read_logs() and not all(
log_type in log_interpretation for
log_type in ('step', 'history', 'task')):
log.info('Scanning logs for probable cause of failure...')
self._interpret_step_logs(log_interpretation, step_type)
self._interpret_history_log(log_interpretation)
error_attempt_ids = _pick_error_attempt_ids(log_interpretation)
self._interpret_task_logs(
log_interpretation, step_type, error_attempt_ids)
return _pick_error(log_interpretation)
### stuff that should just work ###
def _interpret_history_log(self, log_interpretation):
"""Fetch history log and add 'history' to log_interpretation."""
if 'history' in log_interpretation:
return # already interpreted
if not self._read_logs():
return # nothing to do
step_interpretation = log_interpretation.get('step') or {}
job_id = step_interpretation.get('job_id')
if not job_id:
if not log_interpretation.get('no_job'):
log.warning("Can't fetch history log; missing job ID")
return
output_dir = step_interpretation.get('output_dir')
log_interpretation['history'] = _interpret_history_log(
self.fs, self._ls_history_logs(
job_id=job_id, output_dir=output_dir))
def _ls_history_logs(self, job_id=None, output_dir=None):
"""Yield history log matches, logging a message for each one."""
if not self._read_logs():
return
for match in _ls_history_logs(
self.fs,
self._stream_history_log_dirs(output_dir=output_dir),
job_id=job_id):
log.info(' Parsing history log: %s' % match['path'])
yield match
def _interpret_step_logs(self, log_interpretation, step_type):
"""Add *step* to the log interpretation, if it's not already there."""
if 'step' in log_interpretation:
return
if not self._read_logs():
return
step_interpretation = self._get_step_log_interpretation(
log_interpretation, step_type)
if step_interpretation:
log_interpretation['step'] = step_interpretation
def _interpret_task_logs(
self, log_interpretation, step_type, error_attempt_ids=(),
partial=True):
"""Fetch task syslogs and stderr, and add 'task' to interpretation."""
if 'task' in log_interpretation and (
partial or not log_interpretation['task'].get('partial')):
return # already interpreted
if not self._read_logs():
return
step_interpretation = log_interpretation.get('step') or {}
application_id = step_interpretation.get('application_id')
job_id = step_interpretation.get('job_id')
output_dir = step_interpretation.get('output_dir')
yarn = uses_yarn(self.get_hadoop_version())
attempt_to_container_id = log_interpretation.get('history', {}).get(
'attempt_to_container_id', {})
if yarn:
if not application_id:
if not log_interpretation.get('no_job'):
log.warning(
"Can't fetch task logs; missing application ID")
return
else:
if not job_id:
if not log_interpretation.get('no_job'):
log.warning("Can't fetch task logs; missing job ID")
return
if _is_spark_step_type(step_type):
interpret_func = _interpret_spark_task_logs
else:
interpret_func = _interpret_task_logs
log_interpretation['task'] = interpret_func(
self.fs,
self._ls_task_logs(
step_type,
application_id=application_id,
job_id=job_id,
output_dir=output_dir,
error_attempt_ids=error_attempt_ids,
attempt_to_container_id=attempt_to_container_id,
),
partial=partial,
log_callback=_log_parsing_task_log)
def _ls_task_logs(self, step_type,
application_id=None, job_id=None, output_dir=None,
error_attempt_ids=None, attempt_to_container_id=None):
"""Yield task log matches."""
if not self._read_logs():
return
if _is_spark_step_type(step_type):
ls_func = _ls_spark_task_logs
else:
ls_func = _ls_task_logs
# logging messages are handled by a callback in _interpret_task_logs()
matches = ls_func(
self.fs,
self._stream_task_log_dirs(
application_id=application_id, output_dir=output_dir),
application_id=application_id,
job_id=job_id,
error_attempt_ids=error_attempt_ids,
attempt_to_container_id=attempt_to_container_id,
)
for match in matches:
yield match
def _log_counters(self, log_interpretation, step_num):
"""Utility for logging counters (if any) for a step."""
step_type = self._get_step(step_num)['type']
if not _is_spark_step_type(step_type):
counters = self._pick_counters(
log_interpretation, step_type)
if counters:
log.info(_format_counters(counters))
elif self._read_logs():
# should only log this if we actually looked for counters
log.warning('No counters found')
def _read_logs(self):
"""If this is false, we shouldn't attempt to list or cat logs."""
return self._opts.get('read_logs', True)
| 37.676259 | 78 | 0.650754 |
8e5c48a837e0ac5fc48117e0d0ea5c8c20838f36 | 1,776 | py | Python | tests/idp_slo_redirect_conf.py | skanct/pysaml2 | 0c1e26a6dd8759962857a30ebd67f63fe9e881ee | [
"Apache-2.0"
] | 249 | 2018-03-01T09:47:04.000Z | 2022-03-26T04:51:26.000Z | tests/idp_slo_redirect_conf.py | skanct/pysaml2 | 0c1e26a6dd8759962857a30ebd67f63fe9e881ee | [
"Apache-2.0"
] | 416 | 2018-02-21T15:18:35.000Z | 2022-03-04T16:59:36.000Z | tests/idp_slo_redirect_conf.py | skanct/pysaml2 | 0c1e26a6dd8759962857a30ebd67f63fe9e881ee | [
"Apache-2.0"
] | 203 | 2018-02-21T13:53:12.000Z | 2022-03-08T22:22:17.000Z | from saml2 import BINDING_HTTP_REDIRECT
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.saml import NAME_FORMAT_URI
from pathutils import full_path
CONFIG = {
"entityid" : "urn:mace:example.com:saml:roland:idp",
"name" : "Rolands IdP",
"service": {
"idp": {
"endpoints" : {
"single_sign_on_service" : [
("http://localhost:8088/sso", BINDING_HTTP_REDIRECT)],
"single_logout_service": [
("http://localhost:8088/slo", BINDING_HTTP_REDIRECT)]
},
"policy": {
"default": {
"lifetime": {"minutes":15},
"attribute_restrictions": None, # means all I have
"name_form": NAME_FORMAT_URI
},
"urn:mace:example.com:saml:roland:sp": {
"lifetime": {"minutes": 5},
"nameid_format": NAMEID_FORMAT_PERSISTENT,
}
},
"subject_data": full_path("subject_data.db"),
}
},
"debug" : 1,
"key_file" : full_path("test.key"),
"cert_file" : full_path("test.pem"),
"xmlsec_binary" : None,
"metadata": [{
"class": "saml2.mdstore.MetaDataFile",
"metadata": [(full_path("sp_slo_redirect.xml"), )],
}],
"attribute_map_dir" : full_path("attributemaps"),
"organization": {
"name": "Exempel AB",
"display_name": [("Exempel AB","se"),("Example Co.","en")],
"url":"http://www.example.com/roland",
},
"contact_person": [{
"given_name":"John",
"sur_name": "Smith",
"email_address": ["john.smith@example.com"],
"contact_type": "technical",
},
],
}
| 32.888889 | 78 | 0.515203 |
aebba599d6225d1984135139179c7456d6c1b7e8 | 1,586 | py | Python | src/battery_hour_data.py | albinbjorn/battery_simulator | 6636f86cdd5b0ed9a16541ff8ca8f0933360153e | [
"MIT"
] | null | null | null | src/battery_hour_data.py | albinbjorn/battery_simulator | 6636f86cdd5b0ed9a16541ff8ca8f0933360153e | [
"MIT"
] | null | null | null | src/battery_hour_data.py | albinbjorn/battery_simulator | 6636f86cdd5b0ed9a16541ff8ca8f0933360153e | [
"MIT"
] | null | null | null | import numpy as np
def determine_P_instruct(load, P_activate, P_battery_max):
if load <= P_activate and (P_activate - load >= P_battery_max):
P_instruct = P_battery_max
elif load <= P_activate and (P_activate - load < P_battery_max):
P_instruct = P_activate - load
elif load >= P_activate and (load - P_activate > P_battery_max):
P_instruct = -P_battery_max
elif load >= P_activate and (load - P_activate <= P_battery_max):
P_instruct = -(load - P_activate)
return P_instruct
def determine_P_actual(P_instruct, SoC_current, SoC_max):
if P_instruct >= 0 and SoC_current == SoC_max:
P_actual = 0
elif P_instruct <= 0 and SoC_current == 0:
P_actual = 0
elif P_instruct > 0 and SoC_current == 0:
P_actual = P_instruct
elif P_instruct >= 0 and SoC_current <= (SoC_max - P_instruct):
P_actual = P_instruct
elif P_instruct <= 0 and SoC_current >= abs(P_instruct):
P_actual = P_instruct
elif P_instruct >= 0 and P_instruct > (SoC_max - SoC_current):
P_actual = SoC_max - SoC_current
elif P_instruct <= 0 and abs(P_instruct) > SoC_current:
P_actual = -SoC_current
return P_actual
def batt_algo(loadvec, P_activate, P_battery_max, SoC_max, SoC_initial):
loadvec = np.array(loadvec).reshape(-1,1)
SoC = np.zeros((loadvec.shape[0]+1,1))
SoC[0] = SoC_initial
P_battery = np.zeros(loadvec.shape)
for i in range(loadvec.shape[0]):
P_instruct = determine_P_instruct(loadvec[i], P_activate, P_battery_max)
P_battery[i] = determine_P_actual(P_instruct, SoC[i], SoC_max)
SoC[i+1] = SoC[i] + P_battery[i]
SoC = SoC[1:]
return P_battery, SoC | 31.72 | 74 | 0.725725 |
83f10c836f609b70d7232d04e53525e54bfca609 | 5,335 | py | Python | conceptnet5/edges.py | CollectiWise/conceptnet | 2998df5a9d287ca72032abb1d9b082747ba97c08 | [
"Apache-2.0"
] | 2 | 2020-07-19T13:27:29.000Z | 2020-07-19T13:37:28.000Z | conceptnet5/edges.py | terU3760/conceptnet5 | 11ea97fdee0a87170abf948625303bf86c0de835 | [
"Apache-2.0"
] | null | null | null | conceptnet5/edges.py | terU3760/conceptnet5 | 11ea97fdee0a87170abf948625303bf86c0de835 | [
"Apache-2.0"
] | null | null | null | """
Utilities for representing edges (assertions). Most notably, this module
includes the `make_edge` function, which builds the dictionary representing
an edge.
"""
from conceptnet5.uri import (
assertion_uri, uri_prefix, conjunction_uri, is_concept, split_uri
)
from conceptnet5.nodes import ld_node
import re
def make_edge(rel, start, end, dataset, license, sources,
surfaceText=None, surfaceStart=None, surfaceEnd=None, weight=1.0):
"""
Take in the information representing an edge (a justified assertion),
and output that edge in dictionary form.
>>> from pprint import pprint
>>> from conceptnet5.uri import Licenses
>>> e = make_edge(rel='/r/HasProperty',
... start='/c/en/fire',
... end='/c/en/hot',
... dataset='/d/conceptnet/4/en',
... license=Licenses.cc_attribution,
... sources=[{'contributor': '/s/contributor/omcs/dev'}],
... surfaceText='[[Fire]] is [[hot]]',
... weight=1.0)
>>> pprint(e)
{'dataset': '/d/conceptnet/4/en',
'end': '/c/en/hot',
'features': ['/c/en/fire /r/HasProperty -',
'/c/en/fire - /c/en/hot',
'- /r/HasProperty /c/en/hot'],
'license': 'cc:by/4.0',
'rel': '/r/HasProperty',
'sources': [{'contributor': '/s/contributor/omcs/dev'}],
'start': '/c/en/fire',
'surfaceEnd': 'hot',
'surfaceStart': 'Fire',
'surfaceText': '[[Fire]] is [[hot]]',
'uri': '/a/[/r/HasProperty/,/c/en/fire/,/c/en/hot/]',
'weight': 1.0}
"""
pstart = uri_prefix(start)
pend = uri_prefix(end)
if is_concept(pstart) and is_concept(pend):
features = [
"%s %s -" % (pstart, rel),
"%s - %s" % (pstart, pend),
"- %s %s" % (rel, pend)
]
else:
features = []
uri = assertion_uri(rel, start, end)
assert isinstance(sources, list), sources
assert all([isinstance(source, dict) for source in sources]), sources
if surfaceStart is None or surfaceEnd is None:
surfaceStart, surfaceEnd = extract_surface_terms(surfaceText)
obj = {
'uri': uri,
'rel': rel,
'start': start,
'end': end,
'dataset': dataset,
'sources': sources,
'features': features,
'license': license,
'weight': weight,
'surfaceText': surfaceText,
'surfaceStart': surfaceStart,
'surfaceEnd': surfaceEnd
}
return obj
SURFACE_FORM_PATTERN = re.compile(r'\[\[(.*?)\]\]')
def extract_surface_terms(surface):
"""
Some formats don't have separate slots for the surface text of the
'start' and 'end' terms; we only record them as part of the overall
surface text, in double brackets.
A typical surface text will look like this:
[[A dog]] has [[a tail]].
Occasionally, there will be sentence frames that put the 'end' term
before the 'start' term. These are marked with an asterisk.
*[[A tail]] can belong to [[a dog]].
This function returns the terms in their proper order -- 'surfaceStart'
followed by 'surfaceEnd' -- so they can be indexed in the more flexible
jsons and msgpack formats.
"""
if not surface:
return (None, None)
surface_terms = SURFACE_FORM_PATTERN.findall(surface)
if len(surface_terms) != 2:
return (None, None)
if surface.startswith('*'):
surface_terms = surface_terms[::-1]
return surface_terms
def transform_for_linked_data(edge):
"""
Modify an edge (assertion) in place to contain values that are appropriate
for a Linked Data API.
Although this code isn't actually responsible for what an API returns
(see the conceptnet-web repository for that), it helps to deal with what
edge dictionaries should contain here.
The relevant changes are:
- Remove the 'features' list
- Rename 'uri' to '@id'
- Make 'start', 'end', and 'rel' into dictionaries with an '@id' and
'label', removing the separate 'surfaceStart' and 'surfaceEnd'
attributes
- All dictionaries should have an '@id'. For the edge itself, it's the
URI. Without this, we get RDF blank nodes, which are awful.
"""
if 'features' in edge:
del edge['features']
for source in edge['sources']:
conj = conjunction_uri(*sorted(source.values()))
source['@id'] = conj
edge['@id'] = edge['uri']
del edge['uri']
start_uri = edge['start']
end_uri = edge['end']
rel_uri = edge['rel']
start_label = edge.get('surfaceStart')
end_label = edge.get('surfaceEnd')
del edge['surfaceStart']
del edge['surfaceEnd']
edge['start'] = ld_node(start_uri, start_label)
edge['end'] = ld_node(end_uri, end_label)
edge['rel'] = ld_node(rel_uri, None)
if 'other' in edge:
# TODO: Find out when we use this, or remove it if we don't use it
if edge['other'] == start_uri:
edge['other'] = edge['start']
elif edge['other'] == end_uri:
edge['other'] = edge['end']
else:
edge['rel'] = ld_node(rel_uri, None)
return edge
| 33.553459 | 80 | 0.585567 |
b46362e0b685da0ea78c583bcbcbb0826f12201c | 1,100 | py | Python | web/helpers/globals.py | RPANBot/RPANBot | dc0345e40667cb510490ffd32197f3998c5efe2f | [
"Apache-2.0"
] | 5 | 2020-08-25T23:12:33.000Z | 2021-09-06T20:38:44.000Z | web/helpers/globals.py | b1uejay27/RPANBot | 782ad9bc9f968fd52036ef4d541afd9df8c89025 | [
"Apache-2.0"
] | 2 | 2020-10-30T18:23:44.000Z | 2021-09-19T22:07:12.000Z | web/helpers/globals.py | b1uejay27/RPANBot | 782ad9bc9f968fd52036ef4d541afd9df8c89025 | [
"Apache-2.0"
] | 3 | 2022-03-08T19:50:01.000Z | 2022-03-23T23:13:46.000Z | """
Copyright 2020 RPANBot
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from discord import CategoryChannel, TextChannel
from web.helpers.classes import Guild
def get_guild_icon(guild: Guild, size: int = 128, format: str = "jpg") -> str:
if not guild.icon:
return "https://discordapp.com/assets/322c936a8c8be1b803cd94861bdfa868.png"
return f"https://cdn.discordapp.com/icons/{guild.id}/{guild.icon}.{format}?size={size}"
def is_text_channel(channel) -> bool:
return isinstance(channel, TextChannel)
def is_category_channel(channel) -> bool:
return isinstance(channel, CategoryChannel)
| 32.352941 | 91 | 0.762727 |
e219ef988320772d388d89810540c03f61040726 | 2,246 | py | Python | main/tests/test_past_bugs.py | Ecotrust/formhub | 05033bb5aa152cc2cbcd7382c2c999d82b2c3276 | [
"BSD-2-Clause"
] | 123 | 2015-01-08T09:21:05.000Z | 2021-11-14T19:45:23.000Z | main/tests/test_past_bugs.py | Ecotrust/formhub | 05033bb5aa152cc2cbcd7382c2c999d82b2c3276 | [
"BSD-2-Clause"
] | 16 | 2015-02-13T16:56:42.000Z | 2021-02-20T23:58:43.000Z | main/tests/test_past_bugs.py | Ecotrust/formhub | 05033bb5aa152cc2cbcd7382c2c999d82b2c3276 | [
"BSD-2-Clause"
] | 110 | 2015-01-19T14:34:06.000Z | 2021-02-01T14:55:11.000Z | from test_process import TestSite
from test_base import MainTestCase
from odk_logger.models import XForm, Instance
import os
class TestCSVExport(TestSite):
"""
We had a problem when two users published the same form that the
CSV export would break.
"""
def test_process(self):
TestSite.test_process(self)
TestSite.test_process(self, "doug", "doug")
class TestInputs(MainTestCase):
"""
This is where I'll input all files that proved problematic for
users when uploading.
"""
def test_uniqueness_of_group_names_enforced(self):
pre_count = XForm.objects.count()
self._create_user_and_login()
response = self._publish_xls_file(
'fixtures/group_names_must_be_unique.xls')
self.assertTrue(
"There are two sections with the name group_names_must_be_unique."
in response.content)
self.assertEqual(XForm.objects.count(), pre_count)
def test_mch(self):
self._publish_xls_file('fixtures/bug_fixes/MCH_v1.xls')
def test_erics_files(self):
for name in ['battery_life.xls',
'enumerator_weekly.xls',
'Enumerator_Training_Practice_Survey.xls']:
self._publish_xls_file(os.path.join('fixtures', 'bug_fixes', name))
class TestSubmissionBugs(MainTestCase):
def test_submission_with_mixed_case_username(self):
self._publish_transportation_form()
s = self.surveys[0]
count = Instance.objects.count()
self._make_submission(
os.path.join(
self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'), 'BoB')
self.assertEqual(Instance.objects.count(), count + 1)
class TestCascading(MainTestCase):
def test_correct_id_string_picked(self):
XForm.objects.all().delete()
name = 'new_cascading_select.xls'
id_string = u'cascading_select_test'
self._publish_xls_file(os.path.join(
self.this_directory, 'fixtures', 'bug_fixes', name))
self.assertEqual(XForm.objects.count(), 1)
xform_id_string = XForm.objects.all()[0].id_string
self.assertEqual(xform_id_string, id_string)
| 32.550725 | 79 | 0.665183 |
0bc4e55271632b9a502c6f9b94b631d0eacc8082 | 8 | py | Python | kharon/__init__.py | RedRussianBear/kharon | d5ec8ad059d71a69ddc90616eafea38d96de4ed6 | [
"BSD-3-Clause"
] | 1 | 2019-04-15T12:22:46.000Z | 2019-04-15T12:22:46.000Z | kharon/project_templates/__init__.py | RedRussianBear/kharon | d5ec8ad059d71a69ddc90616eafea38d96de4ed6 | [
"BSD-3-Clause"
] | null | null | null | kharon/project_templates/__init__.py | RedRussianBear/kharon | d5ec8ad059d71a69ddc90616eafea38d96de4ed6 | [
"BSD-3-Clause"
] | null | null | null | # Init!
| 4 | 7 | 0.5 |
8cd76474bea97355ed4bfc2d14866804a46659d1 | 8,296 | py | Python | train.py | kushagra414/Tiny-YOLO-v3-for-custom-object | eacd3ad37635beae021ec9c295435772fefe699b | [
"MIT"
] | null | null | null | train.py | kushagra414/Tiny-YOLO-v3-for-custom-object | eacd3ad37635beae021ec9c295435772fefe699b | [
"MIT"
] | null | null | null | train.py | kushagra414/Tiny-YOLO-v3-for-custom-object | eacd3ad37635beae021ec9c295435772fefe699b | [
"MIT"
] | null | null | null | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = 'Hand_Class.txt'
log_dir = 'logs/000/'
classes_path = 'Hand_Class_classes.txt'
anchors_path = 'model_data/tiny_yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 16
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 8 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
| 44.363636 | 129 | 0.676109 |
9560839b9aadf877d4a23b1ff8d168fc4b73a49c | 3,030 | py | Python | example_ctrl-c.py | Bosma/unicorn-binance-websocket-api | e8bfe08125ff0afb8f780c3970a6ba6ec6ba6c54 | [
"MIT"
] | null | null | null | example_ctrl-c.py | Bosma/unicorn-binance-websocket-api | e8bfe08125ff0afb8f780c3970a6ba6ec6ba6c54 | [
"MIT"
] | null | null | null | example_ctrl-c.py | Bosma/unicorn-binance-websocket-api | e8bfe08125ff0afb8f780c3970a6ba6ec6ba6c54 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_ctrl-c.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Thanks to mfiro https://github.com/mfiro for sharing this example!
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import time
import threading
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
try:
print(oldest_stream_data_from_stream_buffer)
except Exception:
# not able to process the data? write it back to the stream_buffer
binance_websocket_api_manager.add_to_stream_buffer(oldest_stream_data_from_stream_buffer)
if __name__ == '__main__':
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com")
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
kline_stream_id = binance_websocket_api_manager.create_stream(['kline', 'kline_1m'], ['btcusdt'])
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
print("\nStopping ... just wait a few seconds!")
binance_websocket_api_manager.stop_manager_with_all_streams()
| 43.285714 | 120 | 0.754125 |
c02e0c74da90e51883da385b0bf3fb352a098007 | 2,268 | py | Python | emotescraper/androidscrape.py | BTDev/berrymotes | f6665b34446e486f4f8a7a56e50dd5cb5bc605c5 | [
"WTFPL"
] | null | null | null | emotescraper/androidscrape.py | BTDev/berrymotes | f6665b34446e486f4f8a7a56e50dd5cb5bc605c5 | [
"WTFPL"
] | 6 | 2020-01-30T10:55:02.000Z | 2021-08-25T05:30:23.000Z | emotescraper/androidscrape.py | BTDev/berrymotes | f6665b34446e486f4f8a7a56e50dd5cb5bc605c5 | [
"WTFPL"
] | 4 | 2021-07-04T19:09:11.000Z | 2021-07-24T03:57:02.000Z | #!/usr/bin/env python2
# --------------------------------------------------------------------
#
# Copyright (C) 2013 Daniel Triendl <daniel@pew.cc>
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# COPYING for more details.
#
# --------------------------------------------------------------------
import logging
logger = logging.basicConfig(level=logging.WARN)
from bmscraper import BMScraper, AndroidEmotesProcessorFactory
from bmscraper.ratelimiter import TokenBucket
from data import *
from json import dumps
import os
import gzip
factory = AndroidEmotesProcessorFactory(single_emotes_filename=os.path.join( '..', 'single_emotes', '{}', '{}.png'))
scraper = BMScraper(factory)
scraper.user = os.environ['REDDIT_USERNAME']
scraper.password = os.environ['REDDIT_PASSWORD']
scraper.subreddits = subreddits
scraper.legacy_subreddits = legacy_subreddits
scraper.image_blacklist = image_blacklist_android
scraper.nsfw_subreddits = nsfw_subreddits
scraper.emote_info = emote_info
scraper.rate_limit_lock = TokenBucket(15, 30)
scraper.scrape()
f = gzip.open(os.path.join('..', 'single_emotes', 'emotes.json.gz'), 'wb')
f.write(dumps(factory.emotes, separators=(',', ': ')))
f.close()
for subreddit in subreddits:
subreddit_emotes = [x for x in factory.emotes if x['sr'] == subreddit]
subreddit_emotes = sorted(subreddit_emotes, key = lambda x: x['image'])
emotes_file = os.path.join('..', 'single_emotes', subreddit, 'emotes.json.gz')
if not os.path.exists(os.path.dirname(emotes_file)):
os.makedirs(os.path.dirname(emotes_file))
emotes_data = dumps(subreddit_emotes, separators=(',', ': '));
emotes_data_old = ''
if (os.path.exists(emotes_file)):
f = gzip.open(emotes_file, 'r')
emotes_data_old = f.read()
f.close()
if emotes_data != emotes_data_old:
f = gzip.open(emotes_file, 'wb')
f.write(emotes_data)
f.close()
f = gzip.open(os.path.join('..', 'single_emotes', 'subreddits.json.gz'), 'wb')
f.write(dumps(subreddits, separators=(',', ': ')))
f.close()
| 36 | 116 | 0.671517 |
a432b76e6db9c0cb70181bc352f296aff7e12b6c | 6,078 | py | Python | radon.py | yuta-hi/chainer-radon-transform | 8928687d779c5fbe9691677209c2c9845924761f | [
"MIT"
] | null | null | null | radon.py | yuta-hi/chainer-radon-transform | 8928687d779c5fbe9691677209c2c9845924761f | [
"MIT"
] | null | null | null | radon.py | yuta-hi/chainer-radon-transform | 8928687d779c5fbe9691677209c2c9845924761f | [
"MIT"
] | 1 | 2020-01-14T04:54:43.000Z | 2020-01-14T04:54:43.000Z | import numpy as np
import chainer
from chainer import link
from warnings import warn
from abc import ABCMeta, abstractmethod
class Projector(link.Chain, metaclass=ABCMeta):
""" Abstract class of the projector for 2D-images """
@abstractmethod
def forward(self, x):
pass
def _check_type_forward(self, x):
assert x.ndim == 4, 'input tensor should be 4-D.'
b, c, w, h = x.shape
assert c == 1, 'input image should be grayscale.'
class ParallelProjector(Projector):
def __init__(self, axis=2, keepdims=True):
self._axis = axis
self._keepdims = keepdims
def forward(self, x):
self._check_type_forward(x)
return chainer.functions.sum(x, self._axis, self._keepdims)
class OrthogonalProjector(Projector):
pass
class Radon(link.Chain):
""" Radon transform of 2D-images given specified projection angles. """
def __init__(self, theta=None):
super(Radon, self).__init__()
if theta is None:
theta = np.arange(180)
self._theta = theta
self._projector = ParallelProjector(axis=2, keepdims=True)
def _check_type_forward(self, x):
assert x.ndim == 4, 'input tensor should be 4-D.'
b, c, w, h = x.shape
assert c == 1, 'input image should be grayscale.'
assert w == h, 'input image should be square.'
def _build_rotation(self, theta, batch_size):
T = np.deg2rad(theta)
R = np.array([[np.cos(T), np.sin(T), 0],
[-np.sin(T), np.cos(T), 0],
[0, 0, 1]])
R = R[:-1,:].astype(np.float32)
R = self.xp.asarray(R[np.newaxis])
return chainer.functions.repeat(R, batch_size, axis=0)
def forward(self, x):
"""Applies the radon transform.
Args:
x (~chainer.Variable): Batch of input images.
Returns:
~chainer.Variable: Batch of output sinograms.
"""
self._check_type_forward(x)
b, c, w, h = x.shape
ret = []
for i, th in enumerate(self._theta):
matrix = self._build_rotation(th, b)
grid = chainer.functions.spatial_transformer_grid(matrix, (w,h))
rotated = chainer.functions.spatial_transformer_sampler(x, grid)
raysum = self._projector(rotated)
ret.append(raysum)
ret = chainer.functions.concat(ret, axis=2)
return ret
def preprocess(image, circle=True):
if circle:
radius = min(image.shape) // 2
c0, c1 = np.ogrid[0:image.shape[0], 0:image.shape[1]]
reconstruction_circle = ((c0 - image.shape[0] // 2) ** 2
+ (c1 - image.shape[1] // 2) ** 2)
reconstruction_circle = reconstruction_circle <= radius ** 2
if not np.all(reconstruction_circle | (image == 0)):
warn('Radon transform: image must be zero outside the '
'reconstruction circle')
# crop image to make it square
slices = []
for d in (0, 1):
if image.shape[d] > min(image.shape):
excess = image.shape[d] - min(image.shape)
slices.append(slice(int(np.ceil(excess / 2)),
int(np.ceil(excess / 2)
+ min(image.shape))))
else:
slices.append(slice(None))
slices = tuple(slices)
padded_image = image[slices]
else:
diagonal = np.sqrt(2) * max(image.shape)
pad = [int(np.ceil(diagonal - s)) for s in image.shape]
new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]
old_center = [s // 2 for s in image.shape]
pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]
pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]
padded_image = np.pad(image, pad_width, mode='constant',
constant_values=0)
return padded_image
if __name__ == '__main__':
import argparse
import cv2
parser = argparse.ArgumentParser(description='Radon Transfrom')
parser.add_argument('--image', '-i', type=str, default='phantom.png')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--trial', '-t', type=int, default=20, help='number of trails')
parser.add_argument('--angle', '-a', type=int, default=500, help='number of angles')
parser.add_argument('--slice', '-s', type=int, default=1, help='number of slices, used for debugging')
args = parser.parse_args()
# setup an input slice
image = cv2.imread(args.image)[:,:,0]
image = preprocess(image)
image = cv2.resize(image, (512,512))
# convert to a volume for debugging.
volume = image[:,:,np.newaxis]
volume = np.repeat(volume, args.slice, axis=2)
w, h, z = volume.shape
# convert to a tensor
b = c = 1
x = volume.reshape(b,c,w,h,z).astype(np.float32)
# reshape the tensor: [b(=1),c(=1),w,h,z] -> [b*z(=z),c(=1),w,h]
x = x.transpose(0,4,1,2,3)
x = x.reshape(b*z,c,w,h)
# to gpu
if args.gpu >= 0:
import cupy as xp
x = xp.asarray(x)
x = chainer.Variable(x)
print(x.shape)
# do
radon = Radon(theta=np.linspace(0,180,args.angle))
if args.gpu >= 0:
chainer.backends.cuda.get_device_from_id(args.gpu).use()
radon.to_gpu()
import tqdm
for _ in tqdm.tqdm(range(args.trial)):
ret = radon(x)
print(ret.shape)
# visualize a graph
import chainer.computational_graph as c
g = c.build_computational_graph(ret)
with open('graph.dot', 'w') as o:
o.write(g.dump())
# to cpu
ret = ret.data
if args.gpu >= 0:
ret = ret.get()
# visualize an sinogram respect to the first slice
import matplotlib.pyplot as plt
plt.figure(figsize=(19,6))
plt.subplot(121)
plt.imshow(image, cmap='gray')
plt.colorbar()
plt.subplot(122)
plt.imshow(ret[0,0], cmap='gray')
plt.colorbar()
plt.show()
| 30.238806 | 106 | 0.579796 |
51b8e372919604e6f194eeeaae53bb6a4c00ed97 | 1,269 | py | Python | imutils1.py | ivominic/opencv | c058e2ce864e4e811ab5a43793ec17216aa7c5b7 | [
"Apache-2.0"
] | null | null | null | imutils1.py | ivominic/opencv | c058e2ce864e4e811ab5a43793ec17216aa7c5b7 | [
"Apache-2.0"
] | null | null | null | imutils1.py | ivominic/opencv | c058e2ce864e4e811ab5a43793ec17216aa7c5b7 | [
"Apache-2.0"
] | null | null | null | """Biblioteka koja će se importovati za rad sa slikama"""
import numpy as np
import cv2
def translate(image, x_osa, y_osa):
"""Metoda koja translira sliku po x i y osi.
Pozitivne vrijednosti desno i dolje, negativne lijevo i gore"""
matrica = np.float32([[1, 0, x_osa], [0, 1, y_osa]])
shifted = cv2.warpAffine(
image, matrica, (image.shape[1], image.shape[0]))
return shifted
def rotate(image, angle, center=None, scale=1.0):
""" Metoda koja rotira sliku oko centra"""
(height, width) = image.shape[:2]
if center is None:
center = (width // 2, height // 2)
matrica = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, matrica, (width, height))
return rotated
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
""" Vrši resize fotografije"""
dim = None
(pom_height, pom_width) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
radius = height/float(pom_height)
dim = (int(pom_width * radius), height)
else:
radius = width/float(pom_width)
dim = (width, int(pom_height * radius))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
| 28.840909 | 67 | 0.647754 |
f4abd1075050417b773e40e0ff102ee297a79c71 | 1,888 | py | Python | nova/auth/rbac.py | bopopescu/cc | 5c14efcda95c4987532484c84a885a3b07efc984 | [
"Apache-2.0"
] | null | null | null | nova/auth/rbac.py | bopopescu/cc | 5c14efcda95c4987532484c84a885a3b07efc984 | [
"Apache-2.0"
] | 1 | 2020-08-02T15:40:49.000Z | 2020-08-02T15:40:49.000Z | nova/auth/rbac.py | bopopescu/cc | 5c14efcda95c4987532484c84a885a3b07efc984 | [
"Apache-2.0"
] | 1 | 2020-07-25T17:56:39.000Z | 2020-07-25T17:56:39.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 Anso Labs, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova.auth import users
def allow(*roles):
def wrap(f):
def wrapped_f(self, context, *args, **kwargs):
if context.user.is_superuser():
return f(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
return f(self, context, *args, **kwargs)
raise exception.NotAuthorized()
return wrapped_f
return wrap
def deny(*roles):
def wrap(f):
def wrapped_f(self, context, *args, **kwargs):
if context.user.is_superuser():
return f(self, context, *args, **kwargs)
for role in roles:
if __matches_role(context, role):
raise exception.NotAuthorized()
return f(self, context, *args, **kwargs)
return wrapped_f
return wrap
def __matches_role(context, role):
if role == 'all':
return True
if role == 'none':
return False
return context.project.has_role(context.user.id, role)
| 33.714286 | 78 | 0.644597 |
052363e40c3dcc84883eaf4519e7e2a4a2642459 | 3,378 | py | Python | simbad/rotsearch/tests/test_rotation_search.py | hlasimpk/SIMPLE | 89570f1a29e2871cb1e85cfda36cfa22fbad0877 | [
"BSD-3-Clause"
] | 2 | 2017-02-14T15:31:30.000Z | 2019-07-20T12:30:59.000Z | simbad/rotsearch/tests/test_rotation_search.py | hlasimpk/SIMPLE | 89570f1a29e2871cb1e85cfda36cfa22fbad0877 | [
"BSD-3-Clause"
] | 65 | 2017-02-14T14:19:28.000Z | 2021-09-21T09:50:02.000Z | simbad/rotsearch/tests/test_rotation_search.py | hlasimpk/SIMPLE | 89570f1a29e2871cb1e85cfda36cfa22fbad0877 | [
"BSD-3-Clause"
] | 7 | 2017-05-09T15:27:08.000Z | 2021-06-13T13:32:40.000Z | """Test functions for rotsearch.AmoreRotationSearch and rotsearch.PhaserRotationSearch"""
__author__ = "Adam Simpkin"
__date__ = "16 Aug 2017"
import os
import unittest
import simbad.rotsearch.amore_search
import simbad.rotsearch.phaser_search
try:
ROOT_DIR = os.environ['SIMBAD_ROOT']
EXAMPLE_DIR = os.path.join(ROOT_DIR, "test_data")
except KeyError:
from simbad.command_line import CCP4RootDirectory
ROOT_DIR = str(CCP4RootDirectory())
EXAMPLE_DIR = os.path.join(ROOT_DIR, "examples")
class Test(unittest.TestCase):
"""Unit test"""
@classmethod
def setUpClass(cls):
mtz = os.path.join(EXAMPLE_DIR, "toxd", "toxd.mtz")
cls.AS = simbad.rotsearch.amore_search.AmoreRotationSearch(mtz, "molrep", "tmp_dir", "work_dir")
cls.PS = simbad.rotsearch.phaser_search.PhaserRotationSearch(mtz, "molrep", "tmp_dir", "work_dir")
def test_sortfun(self):
"""Test case for AmoreRotationSearch.sortfun_stdin_template"""
f = "f"
sigf = "sigf"
data = self.AS.sortfun_stdin_template.format(f=f, sigf=sigf)
reference_data = """TITLE ** spmi packing h k l F for crystal**
SORTFUN RESOL 100. 2.5
LABI FP=f SIGFP=sigf"""
self.assertEqual(data, reference_data)
def test_rotfun(self):
"""Test case for AmoreRotationSearch.rotfun_stdin_template"""
shres = "shres"
intrad = "intrad"
pklim = "pklim"
npic = "npic"
step = "step"
data = self.AS.rotfun_stdin_template.format(shres=shres, intrad=intrad, pklim=pklim, npic=npic, step=step)
reference_data = """TITLE: Generate HKLPCK1 from MODEL FRAGMENT 1
ROTFUN
GENE 1 RESO 100.0 shres CELL_MODEL 80 75 65
CLMN CRYSTAL ORTH 1 RESO 20.0 shres SPHERE intrad
CLMN MODEL 1 RESO 20.0 shres SPHERE intrad
ROTA CROSS MODEL 1 PKLIM pklim NPIC npic STEP step"""
self.assertEqual(data, reference_data)
def test_tabfun(self):
"""Test case for AmoreRotationSearch.tabfun_stdin_template"""
x = 100
y = 200
z = 300
a = 10
b = 20
c = 30
data = self.AS.tabfun_stdin_template.format(x=x, y=y, z=z, a=a, b=b, c=c)
reference_data = """TITLE: Produce table for MODEL FRAGMENT
TABFUN
CRYSTAL 100 200 300 10 20 30 ORTH 1
MODEL 1 BTARGET 23.5
SAMPLE 1 RESO 2.5 SHANN 2.5 SCALE 4.0"""
self.assertEqual(data, reference_data)
def test_rot_job_succeeded_1(self):
"""Test case for AmoreRotationSearch._rot_job_succeeded"""
amore_z_score = 15
data = self.AS._rot_job_succeeded(amore_z_score)
self.assertTrue(data)
def test_rot_job_succeeded_2(self):
"""Test case for AmoreRotationSearch._rot_job_succeeded"""
amore_z_score = 8
data = self.AS._rot_job_succeeded(amore_z_score)
self.assertFalse(data)
def test_rot_job_succeeded_3(self):
"""Test case for PhaserRotationSearch._rot_job_succeeded"""
phaser_z_score = 15
data = self.PS._rot_job_succeeded(phaser_z_score)
self.assertTrue(data)
def test_rot_job_succeeded_4(self):
"""Test case for PhaserRotationSearch._rot_job_succeeded"""
phaser_z_score = 6
data = self.PS._rot_job_succeeded(phaser_z_score)
self.assertFalse(data)
if __name__ == "__main__":
unittest.main()
| 30.160714 | 114 | 0.671107 |
9878a3adefb93ab3921d57a3b9160abf00d8d1d0 | 31,182 | py | Python | nova/tests/api/openstack/test_common.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/test_common.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/test_common.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import xml.dom.minidom as minidom
from lxml import etree
import mock
from testtools import matchers
import webob
import webob.exc
import webob.multidict
from nova.api.openstack import common
from nova.api.openstack import xmlutil
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova import test
from nova.tests import utils
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""Unit tests for the `nova.api.openstack.common.limited` method which
takes in a list of items and, depending on the 'offset' and 'limit' GET
params, returns a subset or complete set of the given items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
# Test offset key works with 0.
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
# Test offset key works with a medium sized number.
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
# Test offset key works with a number over 1000 (max_limit).
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
self.assertEqual(common.limited(self.medium, req), [])
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
# Test offset key works with a blank offset.
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
# Test offset key works with a BAD offset.
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
# Test request with no offset or limit.
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
# Test limit of zero.
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
# Test limit of 10.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
# Test limit of 3000.
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
# Test request with both limit and offset.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
# Test a max_limit other than 1000.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
# Test a negative limit.
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
# Test a negative offset.
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class SortParamUtilsTest(test.TestCase):
def test_get_sort_params_defaults(self):
'''Verifies the default sort key and direction.'''
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
'''Verifies that the defaults can be overriden.'''
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
sort_keys, sort_dirs = common.get_sort_params({}, default_key=None,
default_dir=None)
self.assertEqual([], sort_keys)
self.assertEqual([], sort_dirs)
def test_get_sort_params_single_value(self):
'''Verifies a single sort key and direction.'''
params = webob.multidict.MultiDict()
params.add('sort_key', 'key1')
params.add('sort_dir', 'dir1')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default(self):
'''Verifies a single sort value with a default.'''
params = webob.multidict.MultiDict()
params.add('sort_key', 'key1')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# sort_key was supplied, sort_dir should be defaulted
self.assertEqual(['desc'], sort_dirs)
params = webob.multidict.MultiDict()
params.add('sort_dir', 'dir1')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['created_at'], sort_keys)
# sort_dir was supplied, sort_key should be defaulted
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_multiple_values(self):
'''Verifies multiple sort parameter values.'''
params = webob.multidict.MultiDict()
params.add('sort_key', 'key1')
params.add('sort_key', 'key2')
params.add('sort_key', 'key3')
params.add('sort_dir', 'dir1')
params.add('sort_dir', 'dir2')
params.add('sort_dir', 'dir3')
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
# Also ensure that the input parameters are not modified
sort_key_vals = []
sort_dir_vals = []
while 'sort_key' in params:
sort_key_vals.append(params.pop('sort_key'))
while 'sort_dir' in params:
sort_dir_vals.append(params.pop('sort_dir'))
self.assertEqual(['key1', 'key2', 'key3'], sort_key_vals)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dir_vals)
self.assertEqual(0, len(params))
class PaginationParamsTest(test.TestCase):
"""Unit tests for the `nova.api.openstack.common.get_pagination_params`
method which takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_no_params(self):
# Test no params.
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
# Test valid marker param.
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
# Test valid limit param.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
# Test invalid limit param.
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
# Test valid limit and marker parameters.
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
def test_valid_page_size(self):
# Test valid page_size param.
req = webob.Request.blank('/?page_size=10')
self.assertEqual(common.get_pagination_params(req),
{'page_size': 10})
def test_invalid_page_size(self):
# Test invalid page_size param.
req = webob.Request.blank('/?page_size=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_page_size(self):
# Test valid limit and page_size parameters.
req = webob.Request.blank('/?limit=20&page_size=5')
self.assertEqual(common.get_pagination_params(req),
{'page_size': 5, 'limit': 20})
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_get_id_from_href_with_int_url(self):
fixture = 'http://www.testsite.com/dir/45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int(self):
fixture = '45'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_int_url_query(self):
fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = '45'
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url(self):
fixture = 'http://www.testsite.com/dir/abc123'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid_url_query(self):
fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
actual = common.get_id_from_href(fixture)
expected = "abc123"
self.assertEqual(actual, expected)
def test_get_id_from_href_with_uuid(self):
fixture = 'abc123'
actual = common.get_id_from_href(fixture)
expected = 'abc123'
self.assertEqual(actual, expected)
def test_raise_http_conflict_for_instance_invalid_state(self):
exc = exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
try:
common.raise_http_conflict_for_instance_invalid_state(exc,
'meow', 'fake_server_id')
except webob.exc.HTTPConflict as e:
self.assertEqual(unicode(e),
"Cannot 'meow' instance fake_server_id while it is in "
"fake_attr fake_state")
else:
self.fail("webob.exc.HTTPConflict was not raised")
def test_check_img_metadata_properties_quota_valid_metadata(self):
ctxt = utils.get_test_admin_context()
metadata1 = {"key": "value"}
actual = common.check_img_metadata_properties_quota(ctxt, metadata1)
self.assertIsNone(actual)
metadata2 = {"key": "v" * 260}
actual = common.check_img_metadata_properties_quota(ctxt, metadata2)
self.assertIsNone(actual)
metadata3 = {"key": ""}
actual = common.check_img_metadata_properties_quota(ctxt, metadata3)
self.assertIsNone(actual)
def test_check_img_metadata_properties_quota_inv_metadata(self):
ctxt = utils.get_test_admin_context()
metadata1 = {"a" * 260: "value"}
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata1)
metadata2 = {"": "value"}
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata2)
metadata3 = "invalid metadata"
self.assertRaises(webob.exc.HTTPBadRequest,
common.check_img_metadata_properties_quota, ctxt, metadata3)
metadata4 = None
self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
metadata4))
metadata5 = {}
self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
metadata5))
def test_status_from_state(self):
for vm_state in (vm_states.ACTIVE, vm_states.STOPPED):
for task_state in (task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
actual = common.status_from_state(vm_state, task_state)
expected = 'RESIZE'
self.assertEqual(expected, actual)
def test_status_rebuild_from_state(self):
for vm_state in (vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR):
for task_state in (task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING):
actual = common.status_from_state(vm_state, task_state)
expected = 'REBUILD'
self.assertEqual(expected, actual)
def test_task_and_vm_state_from_status(self):
fixture1 = ['reboot']
actual = common.task_and_vm_state_from_status(fixture1)
expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING]
self.assertEqual(expected, actual)
fixture2 = ['resize']
actual = common.task_and_vm_state_from_status(fixture2)
expected = ([vm_states.ACTIVE, vm_states.STOPPED],
[task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP])
self.assertEqual(expected, actual)
fixture3 = ['resize', 'reboot']
actual = common.task_and_vm_state_from_status(fixture3)
expected = ([vm_states.ACTIVE, vm_states.STOPPED],
[task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING,
task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP])
self.assertEqual(expected, actual)
class TestCollectionLinks(test.NoDBTestCase):
"""Tests the _get_collection_links method."""
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_less_than_limit(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
params = mock.PropertyMock(return_value=dict(limit=10))
type(req).params = params
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items, "ignored", "uuid")
self.assertFalse(href_link_mock.called)
self.assertThat(results, matchers.HasLength(0))
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_equals_given_limit(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
params = mock.PropertyMock(return_value=dict(limit=1))
type(req).params = params
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items,
mock.sentinel.coll_key,
"uuid")
href_link_mock.assert_called_once_with(req, "123",
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_equals_default_limit(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
params = mock.PropertyMock(return_value=dict())
type(req).params = params
self.flags(osapi_max_limit=1)
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items,
mock.sentinel.coll_key,
"uuid")
href_link_mock.assert_called_once_with(req, "123",
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
@mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
def test_items_equals_default_limit_with_given(self, href_link_mock):
items = [
{"uuid": "123"}
]
req = mock.MagicMock()
# Given limit is greater than default max, only return default max
params = mock.PropertyMock(return_value=dict(limit=2))
type(req).params = params
self.flags(osapi_max_limit=1)
builder = common.ViewBuilder()
results = builder._get_collection_links(req, items,
mock.sentinel.coll_key,
"uuid")
href_link_mock.assert_called_once_with(req, "123",
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
class MetadataXMLDeserializationTest(test.TestCase):
deserializer = common.MetadataXMLDeserializer()
def test_create(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key='123'>asdf</meta>
<meta key='567'>jkl;</meta>
</metadata>"""
output = self.deserializer.deserialize(request_body, 'create')
expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
self.assertEqual(output, expected)
def test_create_empty(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
output = self.deserializer.deserialize(request_body, 'create')
expected = {"body": {"metadata": {}}}
self.assertEqual(output, expected)
def test_update_all(self):
request_body = """
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key='123'>asdf</meta>
<meta key='567'>jkl;</meta>
</metadata>"""
output = self.deserializer.deserialize(request_body, 'update_all')
expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
self.assertEqual(output, expected)
def test_update(self):
request_body = """
<meta xmlns="http://docs.openstack.org/compute/api/v1.1"
key='123'>asdf</meta>"""
output = self.deserializer.deserialize(request_body, 'update')
expected = {"body": {"meta": {"123": "asdf"}}}
self.assertEqual(output, expected)
class MetadataXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'one': 'two',
'three': 'four',
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_index_null(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
None: None,
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_index_unicode(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
u'three': u'Jos\xe9',
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 1)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(metadata_elem.text.strip(), meta_value)
def test_show(self):
serializer = common.MetaItemTemplate()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
self.assertEqual(str(root.get('key')), str(meta_key))
self.assertEqual(root.text.strip(), meta_value)
def test_update_all(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'key6': 'value6',
'key4': 'value4',
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 2)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
def test_update_item(self):
serializer = common.MetaItemTemplate()
fixture = {
'meta': {
'one': 'two',
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
self.assertEqual(str(root.get('key')), str(meta_key))
self.assertEqual(root.text.strip(), meta_value)
def test_create(self):
serializer = common.MetadataTemplate()
fixture = {
'metadata': {
'key9': 'value9',
'key2': 'value2',
'key1': 'value1',
},
}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
metadata_elems = root.findall('{0}meta'.format(NS))
self.assertEqual(len(metadata_elems), 3)
for i, metadata_elem in enumerate(metadata_elems):
(meta_key, meta_value) = metadata_dict.items()[i]
self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
actual = minidom.parseString(output.replace(" ", ""))
expected = minidom.parseString("""
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key2">value2</meta>
<meta key="key9">value9</meta>
<meta key="key1">value1</meta>
</metadata>
""".replace(" ", "").replace("\n", ""))
self.assertEqual(expected.toxml(), actual.toxml())
def test_metadata_deserializer(self):
"""Should throw a 400 error on corrupt xml."""
deserializer = common.MetadataXMLDeserializer()
self.assertRaises(
exception.MalformedRequestBody,
deserializer.deserialize,
utils.killer_xml_body())
class LinkPrefixTest(test.NoDBTestCase):
def test_update_link_prefix(self):
vb = common.ViewBuilder()
result = vb._update_link_prefix("http://192.168.0.243:24/",
"http://127.0.0.1/compute")
self.assertEqual("http://127.0.0.1/compute", result)
result = vb._update_link_prefix("http://foo.x.com/v1",
"http://new.prefix.com")
self.assertEqual("http://new.prefix.com/v1", result)
result = vb._update_link_prefix(
"http://foo.x.com/v1",
"http://new.prefix.com:20455/new_extra_prefix")
self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
result)
| 40.814136 | 78 | 0.6159 |
8d810fb5f6442f8e84980f2d14449c5431ddfc52 | 1,194 | py | Python | tests/tests/correctness/Components/BucketSystem/m_cor_001/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
] | 3 | 2019-09-02T18:21:22.000Z | 2020-04-17T16:34:57.000Z | tests/tests/correctness/Components/BucketSystem/m_cor_001/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
] | null | null | null | tests/tests/correctness/Components/BucketSystem/m_cor_001/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
] | null | null | null | # $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
correlator.injectMonitorscript(['BucketSystem.mon'], self.COMPONENTS)
correlator.injectMonitorscript(['BucketSystemCreationFailure.mon'], self.input)
self.waitForSignal('correlator.out', expr='Test finished', condition='==1', timeout=5)
def validate(self):
self.assertLineCount('correlator.out', expr='Successfully threw exception:', condition='==11')
self.assertLineCount('correlator.out', expr='Test Failed', condition='==0')
self.checkSanity()
| 56.857143 | 343 | 0.747906 |
250a60dd6c89e324ec2b897bd07aadd57829f335 | 367 | py | Python | src/pandas_profiling_study/report/presentation/core/frequency_table.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | null | null | null | src/pandas_profiling_study/report/presentation/core/frequency_table.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | null | null | null | src/pandas_profiling_study/report/presentation/core/frequency_table.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | 1 | 2020-04-25T15:20:39.000Z | 2020-04-25T15:20:39.000Z | from typing import Any
from ....report.presentation.abstract.item_renderer import ItemRenderer
class FrequencyTable(ItemRenderer):
def __init__(self, rows, **kwargs):
super().__init__("frequency_table", {"rows": rows}, **kwargs)
def __repr__(self):
return "FrequencyTable"
def render(self) -> Any:
raise NotImplementedError()
| 24.466667 | 71 | 0.689373 |
bf7f79916678ccd3c138e6978dbe98f8fadc0cb0 | 5,137 | py | Python | quant_disp.py | UltronAI/SfmLearner-Pytorch | 4f8f7f6057150b7a449b05e49eb2e2dca9cc0d56 | [
"MIT"
] | null | null | null | quant_disp.py | UltronAI/SfmLearner-Pytorch | 4f8f7f6057150b7a449b05e49eb2e2dca9cc0d56 | [
"MIT"
] | null | null | null | quant_disp.py | UltronAI/SfmLearner-Pytorch | 4f8f7f6057150b7a449b05e49eb2e2dca9cc0d56 | [
"MIT"
] | null | null | null | import torch
from imageio import imread, imsave
from scipy.misc import imresize
import numpy as np
from path import Path
import argparse
from tqdm import tqdm
from models import QuantDispNetS
from utils import tensor2array, getScale, quantize, quantizeWeight
parser = argparse.ArgumentParser(description='Quantize Pretrained DispNet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--output-disp", action='store_true', help="save disparity img")
parser.add_argument("--output-depth", action='store_true', help="save depth img")
parser.add_argument("--pretrained", required=True, type=str, help="pretrained DispNet path")
parser.add_argument("--img-height", default=128, type=int, help="Image height")
parser.add_argument("--img-width", default=416, type=int, help="Image width")
parser.add_argument("--no-resize", action='store_true', help="no resizing is done")
parser.add_argument("--dataset-list", default=None, type=str, help="Dataset list file")
parser.add_argument("--dataset-dir", default='.', type=str, help="Dataset directory")
parser.add_argument("--output-dir", default='output', type=str, help="Output directory")
parser.add_argument("--img-exts", default=['png', 'jpg', 'bmp'], nargs='*', type=str, help="images extensions to glob")
parser.add_argument("--quantize-weights", action='store_true')
parser.add_argument("--original-input", action='store_true')
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
fix_info = {}
def quantHookWithName(module_name):
def quantHook(module, input, output):
if 'input' not in fix_info[module_name].keys():
fix_info[module_name]['input'] = getScale(input[0].cpu().numpy().copy())
else:
fix_info[module_name]['input'] = min(fix_info[module_name]['input'], getScale(input[0].cpu().numpy().copy()))
if 'output' not in fix_info[module_name].keys():
fix_info[module_name]['output'] = getScale(output[0].cpu().numpy().copy())
else:
fix_info[module_name]['output'] = min(fix_info[module_name]['output'], getScale(output[0].cpu().numpy().copy()))
return quantHook
@torch.no_grad()
def main():
args = parser.parse_args()
if not(args.output_disp or args.output_depth):
print('You must at least output one value !')
# return
disp_net = QuantDispNetS().to(device)
weights = torch.load(args.pretrained)
disp_net.load_state_dict(weights['state_dict'])
disp_net.eval()
dataset_dir = Path(args.dataset_dir)
output_dir = Path(args.output_dir)
output_dir.makedirs_p()
if args.quantize_weights:
print("Quantizing the pretrained model ...")
quant_weights = { 'state_dict': quantizeWeight(weights['state_dict'], fix_info) }
torch.save(quant_weights, output_dir/'quant_dispnet_model.pth.tar')
else:
for key in weights['state_dict'].keys():
fix_info['.'.join(key.split('.')[:-1])] = {}
if args.dataset_list is not None:
with open(args.dataset_list, 'r') as f:
test_files = [dataset_dir/file for file in f.read().splitlines()]
else:
test_files = sum([dataset_dir.files('*.{}'.format(ext)) for ext in args.img_exts], [])
print('{} files to test'.format(len(test_files)))
for file in tqdm(test_files):
img = imread(file).astype(np.float32)
h,w,_ = img.shape
if (not args.no_resize) and (h != args.img_height or w != args.img_width):
img = imresize(img, (args.img_height, args.img_width)).astype(np.float32)
img = np.transpose(img, (2, 0, 1))
tensor_img = torch.from_numpy(img).unsqueeze(0)
if args.original_input:
tensor_img = (tensor_img).to(device)
else:
tensor_img = ((tensor_img/255 - 0.5)/0.2).to(device)
handles = []
for name, module in disp_net.named_modules():
if name in fix_info.keys():
handle = module.register_forward_hook(quantHookWithName(name))
handles.append(handle)
output = disp_net(tensor_img)[0]
for handle in handles:
handle.remove()
if args.output_disp:
disp = (255*tensor2array(output, max_value=None, colormap='bone')).astype(np.uint8)
imsave(output_dir/'{}_disp{}'.format(file.namebase,file.ext), disp)
if args.output_depth:
depth = 1/output
depth = (255*tensor2array(depth, max_value=10, colormap='rainbow')).astype(np.uint8)
imsave(output_dir/'{}_depth{}'.format(file.namebase,file.ext), depth)
if not args.quantize_weights:
return
with open(output_dir/'dispnet_fix_info.txt', 'w') as f:
count = 0
for key, value in fix_info.items():
f.write('{count} {layer} 8 {input} 8 {output} 8 {weight} 8 {bias} \n'.format(
count=count, layer=key, input=int(value['input']), output=int(value['output']), weight=int(value['weight']), bias=int(value['bias'])))
count += 1
if __name__ == '__main__':
main()
| 42.454545 | 154 | 0.651937 |
5395d36eba916372f19d3a81ce0e682c8b649e14 | 1,623 | py | Python | photos/migrations/0001_initial.py | Elrophi/django-gallery | ff8d592c368cdcb5796ea2605b188517bb74eb80 | [
"MIT"
] | 1 | 2021-11-06T19:55:53.000Z | 2021-11-06T19:55:53.000Z | photos/migrations/0001_initial.py | Elrophi/django-gallery | ff8d592c368cdcb5796ea2605b188517bb74eb80 | [
"MIT"
] | null | null | null | photos/migrations/0001_initial.py | Elrophi/django-gallery | ff8d592c368cdcb5796ea2605b188517bb74eb80 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-15 19:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location_name', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('image_name', models.CharField(max_length=100)),
('image_description', models.TextField()),
('author', models.CharField(default='admin', max_length=60)),
('date', models.DateTimeField(auto_now_add=True)),
('image_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.category')),
('image_location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.location')),
],
),
]
| 37.744186 | 121 | 0.582255 |
692902b3ed4e8034bf418b9d9452261dc37a189f | 2,501 | py | Python | dateparser/data/date_translation_data/rm.py | Rodp63/dateparser | 938a9573234679b603210bd47cc93eb258b1f1df | [
"BSD-3-Clause"
] | null | null | null | dateparser/data/date_translation_data/rm.py | Rodp63/dateparser | 938a9573234679b603210bd47cc93eb258b1f1df | [
"BSD-3-Clause"
] | null | null | null | dateparser/data/date_translation_data/rm.py | Rodp63/dateparser | 938a9573234679b603210bd47cc93eb258b1f1df | [
"BSD-3-Clause"
] | null | null | null | info = {
"name": "rm",
"date_order": "DMY",
"january": [
"schan",
"schaner"
],
"february": [
"favr",
"favrer"
],
"march": [
"mars"
],
"april": [
"avr",
"avrigl"
],
"may": [
"matg"
],
"june": [
"zercl",
"zercladur"
],
"july": [
"fan",
"fanadur"
],
"august": [
"avust"
],
"september": [
"sett",
"settember"
],
"october": [
"oct",
"october"
],
"november": [
"nov",
"november"
],
"december": [
"dec",
"december"
],
"monday": [
"gli",
"glindesdi"
],
"tuesday": [
"ma",
"mardi"
],
"wednesday": [
"me",
"mesemna"
],
"thursday": [
"gie",
"gievgia"
],
"friday": [
"ve",
"venderdi"
],
"saturday": [
"so",
"sonda"
],
"sunday": [
"du",
"dumengia"
],
"am": [
"am"
],
"pm": [
"pm"
],
"year": [
"onn"
],
"month": [
"mais"
],
"week": [
"emna"
],
"day": [
"tag"
],
"hour": [
"ura"
],
"minute": [
"minuta"
],
"second": [
"secunda"
],
"relative-type": {
"0 day ago": [
"oz"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"this month"
],
"0 second ago": [
"now"
],
"0 week ago": [
"this week"
],
"0 year ago": [
"this year"
],
"1 day ago": [
"ier"
],
"1 month ago": [
"last month"
],
"1 week ago": [
"last week"
],
"1 year ago": [
"last year"
],
"in 1 day": [
"damaun"
],
"in 1 month": [
"next month"
],
"in 1 week": [
"next week"
],
"in 1 year": [
"next year"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| 14.976048 | 26 | 0.261096 |
c413de1236b37bde8fe619b2fa80dd2fe5f528f5 | 1,363 | py | Python | soundtests/soundtest4.py | MageJohn/CHIP8 | 58fdff9e9860a3ff2b591e58a5e907faa5a90f4c | [
"MIT"
] | 1 | 2016-09-26T06:52:36.000Z | 2016-09-26T06:52:36.000Z | soundtests/soundtest4.py | MageJohn/CHIP8 | 58fdff9e9860a3ff2b591e58a5e907faa5a90f4c | [
"MIT"
] | null | null | null | soundtests/soundtest4.py | MageJohn/CHIP8 | 58fdff9e9860a3ff2b591e58a5e907faa5a90f4c | [
"MIT"
] | null | null | null | # The first sound test I wrote from scratch which functioned
# correctly. I had been fiddling with soundtest2.py a lot, and it was
# getting overly complicated. I decided that it would be best start
# over, and keep it simple. After this was working, I went back and
# applied what I learned to soundtest2.py, and got that functioning.
from sdl2 import *
from ctypes import *
AMPLITUDE = 255
SAMPLES_PER_SECOND = 44100
CHANNELS = 1
TONE_HZ = 262
SQUARE_WAVE_PERIOD = int(SAMPLES_PER_SECOND / TONE_HZ)
HALF_SQUARE_WAVE_PERIOD = int(SQUARE_WAVE_PERIOD / 2)
audio_settings = SDL_AudioSpec(freq=SAMPLES_PER_SECOND,
aformat=AUDIO_U8,
channels=CHANNELS,
samples=4096)
SDL_Init(SDL_INIT_AUDIO)
devid = SDL_OpenAudioDevice(None, 0, audio_settings, audio_settings, 0)
running_sample_index = 0
phase = 0
SDL_PauseAudioDevice(devid, 0)
while True:
sound_buffer = (c_ubyte * audio_settings.samples)()
for i in range(audio_settings.samples):
running_sample_index += 1
if running_sample_index % HALF_SQUARE_WAVE_PERIOD == 0:
phase = int(not phase)
sample = AMPLITUDE * phase
sound_buffer[i] = c_ubyte(sample)
SDL_QueueAudio(devid, byref(sound_buffer), audio_settings.samples)
print(SDL_GetQueuedAudioSize(devid))
| 32.452381 | 71 | 0.702861 |
5a3c10926d6e107d60f0940a9e8f76a582e731c9 | 3,541 | py | Python | python3/env/lib/python3.8/site-packages/pip/_vendor/requests/help.py | hedibejaoui/spark-timeseries | 9112dcbbba4e095b5eb46c568e1c72e13e1f251a | [
"Apache-2.0"
] | null | null | null | python3/env/lib/python3.8/site-packages/pip/_vendor/requests/help.py | hedibejaoui/spark-timeseries | 9112dcbbba4e095b5eb46c568e1c72e13e1f251a | [
"Apache-2.0"
] | null | null | null | python3/env/lib/python3.8/site-packages/pip/_vendor/requests/help.py | hedibejaoui/spark-timeseries | 9112dcbbba4e095b5eb46c568e1c72e13e1f251a | [
"Apache-2.0"
] | 1 | 2021-09-05T15:05:53.000Z | 2021-09-05T15:05:53.000Z | """Module containing bug report helper(s)."""
import json
import platform
import sys
import ssl
from pip._vendor import idna
from pip._vendor import urllib3
from pip._vendor import chardet
from . import __version__ as requests_version
try:
from pip._vendor.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| 29.508333 | 78 | 0.629201 |
13b7ff5a376a0218faf1bcbc85c084fcffbf0578 | 3,964 | py | Python | hdc/core/asset_mapper.py | hashmapinc/hdc | 34bbd4bd889629d80614410332a0bb6e1ee5b329 | [
"Apache-2.0"
] | null | null | null | hdc/core/asset_mapper.py | hashmapinc/hdc | 34bbd4bd889629d80614410332a0bb6e1ee5b329 | [
"Apache-2.0"
] | null | null | null | hdc/core/asset_mapper.py | hashmapinc/hdc | 34bbd4bd889629d80614410332a0bb6e1ee5b329 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2020 Hashmap, Inc
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
#TODO: Module description
"""
import logging
from providah.factories.package_factory import PackageFactory as providah_pkg_factory
from hdc.core.catalog.crawler import Crawler
from hdc.core.create.creator import Creator
from hdc.core.exceptions.hdc_error import HdcError
from hdc.core.map.mapper import Mapper
from hdc.utils import file_utils
class AssetMapper:
def __init__(self, **kwargs):
self._logger = self._get_logger()
source = kwargs.get('source')
destination = kwargs.get('destination')
app_config = file_utils.get_app_config(kwargs.get('app_config', None))
if source in app_config['sources'].keys():
self._crawler: Crawler = providah_pkg_factory.create(key=app_config['sources'][source]['type'],
library='hdc',
configuration={'conf': app_config['sources'][source][
'conf']})
else:
raise HdcError(message=f"{source} not registered in 'sources' in {kwargs.get('app_config') or 'hdc.yml'}")
if source in app_config['mappers'].keys() and destination in app_config['mappers'][source].keys():
self._mapper: Mapper = providah_pkg_factory.create(key=app_config['mappers'][source][destination]['type'],
library='hdc',
configuration={'conf': (app_config['mappers']
[source]
[destination]
).get('conf', {"report": False})})
else:
raise HdcError(
message=f"{source}/{destination} not registered in 'mappers' in {kwargs.get('app_config') or 'hdc.yml'}")
if destination in app_config['destinations'].keys():
self._creator: Creator = providah_pkg_factory.create(key=app_config['destinations'][destination]['type'],
library='hdc',
configuration={'conf': app_config['destinations'][
destination]['conf']})
else:
raise HdcError(
message=f"{destination} not registered in 'destinations' in {kwargs.get('app_config') or 'hdc.yml'}")
def map_assets(self) -> bool:
success = False
try:
catalog_dataframe = self._crawler.obtain_catalog()
if catalog_dataframe is not None:
sql_ddl_list = self._mapper.map_assets(catalog_dataframe)
self._logger.debug(sql_ddl_list)
self._creator.replicate_structures(sql_ddl_list)
success = True
except Exception:
import traceback as tb
self._logger.error(f"{tb.format_exc()}")
raise HdcError(message=f"Failed to map the source to destination", traceback=tb.format_exc())
return success
def _get_logger(self):
return logging.getLogger(self.__class__.__name__)
| 47.759036 | 121 | 0.557013 |
c2b91b19613e8e58a4a98756bb275c0418965d35 | 657 | py | Python | Easy/shortest_distance_to_character.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Easy/shortest_distance_to_character.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Easy/shortest_distance_to_character.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | class Solution:
def shortestToChar(self, s: str, c: str) -> List[int]:
indexes = [i for i,x in enumerate(s) if x == c]
ans = [10**4]*len(s); curr = 0
for i in range(len(s)):
if s[i] == c:
ans[i] = 0
curr += 1
else:
if curr == 0:
ans[i] = indexes[curr] - i
else:
if curr == len(indexes): ans[i] = i - indexes[curr-1]
else:
right = indexes[curr]; left = indexes[curr-1]
ans[i] = min(right - i, i - left)
return ans
| 36.5 | 73 | 0.380518 |
06fc1cb7b8e16363965e73a4446321b8b1e1838a | 4,706 | py | Python | datasets/data_prefetcher.py | Tarandro/MOTR_4 | bd8e53d7ea0584f06ccf032b056b327c87986ca7 | [
"MIT"
] | 191 | 2021-03-30T12:44:27.000Z | 2022-03-31T02:06:22.000Z | datasets/data_prefetcher.py | Tarandro/MOTR_4 | bd8e53d7ea0584f06ccf032b056b327c87986ca7 | [
"MIT"
] | 35 | 2021-05-11T06:33:26.000Z | 2022-03-27T04:21:17.000Z | datasets/data_prefetcher.py | Tarandro/MOTR_4 | bd8e53d7ea0584f06ccf032b056b327c87986ca7 | [
"MIT"
] | 34 | 2021-05-10T04:30:55.000Z | 2022-03-30T04:58:52.000Z | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import torch
from functools import partial
from models.structures import Instances
def to_cuda(samples, targets, device):
samples = samples.to(device, non_blocking=True)
targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]
return samples, targets
def tensor_to_cuda(tensor: torch.Tensor, device):
return tensor.to(device)
def is_tensor_or_instances(data):
return isinstance(data, torch.Tensor) or isinstance(data, Instances)
def data_apply(data, check_func, apply_func):
if isinstance(data, dict):
for k in data.keys():
if check_func(data[k]):
data[k] = apply_func(data[k])
elif isinstance(data[k], dict) or isinstance(data[k], list):
data_apply(data[k], check_func, apply_func)
else:
raise ValueError()
elif isinstance(data, list):
for i in range(len(data)):
if check_func(data[i]):
data[i] = apply_func(data[i])
elif isinstance(data[i], dict) or isinstance(data[i], list):
data_apply(data[i], check_func, apply_func)
else:
raise ValueError("invalid type {}".format(type(data[i])))
else:
raise ValueError("invalid type {}".format(type(data)))
return data
def data_dict_to_cuda(data_dict, device):
return data_apply(data_dict, is_tensor_or_instances, partial(tensor_to_cuda, device=device))
class data_prefetcher():
def __init__(self, loader, device, prefetch=True):
self.loader = iter(loader)
self.prefetch = prefetch
self.device = device
if prefetch:
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_samples, self.next_targets = next(self.loader)
except StopIteration:
self.next_samples = None
self.next_targets = None
return
# if record_stream() doesn't work, another option is to make sure device inputs are created
# on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
# Need to make sure the memory allocated for next_* is not still in use by the main stream
# at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
def next(self):
if self.prefetch:
torch.cuda.current_stream().wait_stream(self.stream)
samples = self.next_samples
targets = self.next_targets
if samples is not None:
samples.record_stream(torch.cuda.current_stream())
if targets is not None:
for t in targets:
for k, v in t.items():
v.record_stream(torch.cuda.current_stream())
self.preload()
else:
try:
samples, targets = next(self.loader)
samples, targets = to_cuda(samples, targets, self.device)
except StopIteration:
print("catch_stop_iter")
samples = None
targets = None
return samples, targets
| 40.921739 | 109 | 0.584573 |
a841c94d8241ae605fff020b467655d9c982b8e5 | 16,236 | py | Python | domainbed/datasets.py | AllenPu/mbdg | 243f53a57dcf4bfb6e717c0c9f64a839cff8d548 | [
"MIT"
] | null | null | null | domainbed/datasets.py | AllenPu/mbdg | 243f53a57dcf4bfb6e717c0c9f64a839cff8d548 | [
"MIT"
] | null | null | null | domainbed/datasets.py | AllenPu/mbdg | 243f53a57dcf4bfb6e717c0c9f64a839cff8d548 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
from PIL import Image, ImageFile
from torchvision import transforms
import torchvision.datasets.folder
from torch.utils.data import TensorDataset, Subset, ConcatDataset
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms.functional import rotate
#from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
#from wilds.datasets.fmow_dataset import FMoWDataset
ImageFile.LOAD_TRUNCATED_IMAGES = True
DATASETS = [
# Debug
"Debug28",
"Debug224",
# Small images
"ColoredMNIST",
"RotatedMNIST",
# Big images
"VLCS",
"PACS",
"OfficeHome",
"TerraIncognita",
"DomainNet",
"SVIRO",
# WILDS datasets
"WILDSCamelyon",
"WILDSFMoW",
"EDGEvolCircle",
"EDGRPlate",
"EDGPortrait",
"EDGForestCover"
]
def get_dataset_class(dataset_name):
"""Return the dataset class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
def num_environments(dataset_name):
return len(get_dataset_class(dataset_name).ENVIRONMENTS)
class MultipleDomainDataset:
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 8 # Default, subclasses may override
ENVIRONMENTS = None # Subclasses should override
INPUT_SHAPE = None # Subclasses should override
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class Debug(MultipleDomainDataset):
def __init__(self, root, test_envs, hparams):
super().__init__()
self.input_shape = self.INPUT_SHAPE
self.num_classes = 2
self.datasets = []
for _ in [0, 1, 2]:
self.datasets.append(
TensorDataset(
torch.randn(16, *self.INPUT_SHAPE),
torch.randint(0, self.num_classes, (16,))
)
)
class Debug28(Debug):
INPUT_SHAPE = (3, 28, 28)
ENVIRONMENTS = ['0', '1', '2']
class Debug224(Debug):
INPUT_SHAPE = (3, 224, 224)
ENVIRONMENTS = ['0', '1', '2']
class MultipleEnvironmentMNIST(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
])
original_dataset_tr = MNIST(root, train=True, download=True, transform=transform)
original_dataset_te = MNIST(root, train=False, download=True, transform=transform)
data = ConcatDataset([original_dataset_tr, original_dataset_te])
original_images = torch.cat([img for img, _ in data])
original_labels = torch.cat([torch.tensor(label).unsqueeze(0) for _, label in data])
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
for i in range(len(environments)):
images = original_images[i::len(environments)]
labels = original_labels[i::len(environments)]
self.datasets.append(dataset_transform(images, labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
class ColoredMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['+90%', '+80%', '-90%']
def __init__(self, root, test_envs, hparams):
super(ColoredMNIST, self).__init__(root, [0.1, 0.2, 0.9],
self.color_dataset, (2, 32, 32,), 2)
self.input_shape = (2, 32, 32,)
self.num_classes = 2
def color_dataset(self, images, labels, environment):
# Assign a binary label based on the digit
labels = (labels < 5).float()
# Flip label with probability 0.25
labels = self.torch_xor_(labels, self.torch_bernoulli_(0.25, len(labels)))
# Assign a color based on the label; flip the color with probability e
colors = self.torch_xor_(labels, self.torch_bernoulli_(environment, len(labels)))
images = torch.stack([images, images], dim=1)
# Apply the color to the image by zeroing out the other color channel
images[torch.tensor(range(len(images))), (1 - colors).long(), :, :] *= 0
x = images.float() #.div_(255.0)
y = labels.view(-1).long()
return TensorDataset(x, y)
def torch_bernoulli_(self, p, size):
return (torch.rand(size) < p).float()
def torch_xor_(self, a, b):
return (a - b).abs()
class RotatedMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']
def __init__(self, root, test_envs, hparams):
super(RotatedMNIST, self).__init__(root, [0, 15, 30, 45, 60, 75],
self.rotate_dataset, (1, 28, 28,), 10)
def rotate_dataset(self, images, labels, angle):
rotation = transforms.Compose([
transforms.ToPILImage(),
transforms.Lambda(lambda x: rotate(x, angle, fill=(0,),
resample=Image.BICUBIC)),
transforms.ToTensor()])
x = torch.zeros(len(images), 1, 32, 32)
for i in range(len(images)):
x[i] = rotation(images[i])
y = labels.view(-1)
return TensorDataset(x, y)
class MultipleEnvironmentImageFolder(MultipleDomainDataset):
def __init__(self, root, test_envs, augment, hparams):
super().__init__()
environments = [f.name for f in os.scandir(root) if f.is_dir()]
environments = sorted(environments)
transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
# transforms.Resize((224,224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
for i, environment in enumerate(environments):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
path = os.path.join(root, environment)
env_dataset = ImageFolder(path,
transform=env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = len(self.datasets[-1].classes)
class VLCS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["C", "L", "S", "V"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "VLCS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class PACS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["A", "C", "P", "S"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "PACS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class DomainNet(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 1000
ENVIRONMENTS = ["clip", "info", "paint", "quick", "real", "sketch"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "domain_net/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class OfficeHome(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["A", "C", "P", "R"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "office_home/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class TerraIncognita(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["L100", "L38", "L43", "L46"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "terra_incognita/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class SVIRO(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["aclass", "escape", "hilux", "i3", "lexus", "tesla", "tiguan", "tucson", "x5", "zoe"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "sviro/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class WILDSEnvironment:
def __init__(
self,
wilds_dataset,
metadata_name,
metadata_value,
transform=None):
self.name = metadata_name + "_" + str(metadata_value)
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_array = wilds_dataset.metadata_array
subset_indices = torch.where(
metadata_array[:, metadata_index] == metadata_value)[0]
self.dataset = wilds_dataset
self.indices = subset_indices
self.transform = transform
def __getitem__(self, i):
x = self.dataset.get_input(self.indices[i])
if type(x).__name__ != "Image":
x = Image.fromarray(x)
y = self.dataset.y_array[self.indices[i]]
if self.transform is not None:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.indices)
class WILDSDataset(MultipleDomainDataset):
INPUT_SHAPE = (3, 96, 96)
def __init__(self, dataset, metadata_name, test_envs, augment, hparams):
super().__init__()
transform = transforms.Compose([
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
transforms.Resize((96, 96)),
transforms.RandomResizedCrop(96, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
for i, metadata_value in enumerate(
self.metadata_values(dataset, metadata_name)):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
env_dataset = WILDSEnvironment(
dataset, metadata_name, metadata_value, env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = dataset.n_classes
def metadata_values(self, wilds_dataset, metadata_name):
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_vals = wilds_dataset.metadata_array[:, metadata_index]
return sorted(list(set(metadata_vals.view(-1).tolist())))
class WILDSCamelyon(WILDSDataset):
ENVIRONMENTS = [ "hospital_0", "hospital_1", "hospital_2", "hospital_3",
"hospital_4"]
def __init__(self, root, test_envs, hparams):
dataset = Camelyon17Dataset(root_dir=root)
super().__init__(
dataset, "hospital", test_envs, hparams['data_augmentation'], hparams)
class WILDSFMoW(WILDSDataset):
ENVIRONMENTS = [ "region_0", "region_1", "region_2", "region_3",
"region_4", "region_5"]
def __init__(self, root, test_envs, hparams):
dataset = FMoWDataset(root_dir=root)
super().__init__(
dataset, "region", test_envs, hparams['data_augmentation'], hparams)
#
# ADD NEW DATASET
#
class SimpleSyntheticDataset(MultipleDomainDataset):
def __init__(self, data_dir, test_envs, hparams):
self.data_dir = data_dir
# load data
data_pkl = self.load_data(data_dir)
# config
domain_num = len(list(set(data_pkl['domain'])))
self.input_shape = data_pkl['data'][0].shape
self.num_classes = len(list(set(data_pkl['label'])))
self.ENVIRONMENTS = ['Domain '+ str(i) for i in range(domain_num)]
# convert to torch Dataset
self.datasets = []
for d in range(domain_num):
# get x, y from data_pkl
idx = data_pkl['domain'] == d
x = data_pkl['data'][idx].astype(np.float32)
y = data_pkl['label'][idx].astype(np.int64)
y = torch.tensor(y).view(-1).long() # turn 1, 2, 3 to 0, 1, 2
self.datasets.append(TensorDataset(torch.tensor(x).float(), y))
def load_data(self, path=None):
if not path: raise NotImplementedError
return self.read_pickle(path)
def read_pickle(self, name):
with open(name, 'rb') as f:
data = pickle.load(f)
return data
#########
class EDGRPlate(SimpleSyntheticDataset):
def __init__(self, data_dir, test_envs, hparams):
super(EDGRPlate, self).__init__(
'../datasets_for_domainbed/RPlate/data/RPlate.pkl',
test_envs, hparams)
#########
class EDGEvolCircle(SimpleSyntheticDataset):
def __init__(self, data_dir, test_envs, hparams):
super(EDGEvolCircle, self).__init__(
'../datasets_for_domainbed/toy-circle/data/half-circle.pkl',
test_envs, hparams)
##########
class EDGForestCover(MultipleDomainDataset):
def __init__(self, data_dir, test_envs, hparams):
self.data_dir = data_dir
COL = 'Elevation'
MAX = 3451 # df[COL].max()
MIN = 2061 # df[COL].min()
COUNT = hparams['env_number']
# pre
self.datasets = []
# df = self.load_forestcover_data().drop('Id', axis = 1)
df = self.load_forestcover_data()
# MAX = df[COL].max() # 3451 # df[col].max()
# MIN = df[COL].min() # 2061 # df[col].min()
bins = np.arange(MIN, MAX, (MAX - MIN)/COUNT)
se1 = pd.cut(df[COL], bins)
df = df.drop(COL, axis=1)
gb = df.groupby(se1)
gbs = [gb.get_group(x) for x in gb.groups]
# groupby('Cover_Type').size()
for each in gbs:
print(each.groupby('label').size())
gbs = [self.get_xy_from_df(each) for each in gbs]
for x, y in gbs:
y = torch.tensor(y).view(-1).long() # turn 1, 2, 3 to 0, 1, 2
# print(y)
self.datasets.append(TensorDataset(torch.tensor(x).float(), y))
self.input_shape = (54, )
self.num_classes = 2
self.ENVIRONMENTS = [str(i)
for i in range(COUNT-1)]
return
def load_forestcover_data(self, path='ForestCover/train.csv'):
df = pd.read_csv(os.path.join(self.data_dir, path))
df = df.rename(columns={"Cover_Type": "label"})
df = self.group_labels(df)
df = df.drop('Id', axis=1)
df = df.sample(frac=1).reset_index(drop=True)
df = df.sample(frac=1).reset_index(drop=False) # [index, label]
return df
def group_labels(self, df):
groups = [
[0, 1, 6, 3],
[4, 5, 2, 7],
]
# print(df)
def new_label(row):
for new_l in range(len(groups)):
if row['label'] in groups[new_l]:
return new_l
df['label'] = df.apply(new_label, axis=1)
# print(df)
return df
def get_xy_from_df(self, df):
Y = df['label'].to_numpy()
X = df.drop('label', axis='columns').to_numpy()
return (X, Y)
| 34.398305 | 105 | 0.608339 |
e7e622cd6afb351ce2d0e2e0a38270046c40c557 | 2,852 | py | Python | urllib3/__init__.py | little-car-counter/backend-app | c0f2bef4f620d08c7296ebb5e71dbcaf3bec49f0 | [
"MIT"
] | 1 | 2016-11-16T16:15:54.000Z | 2016-11-16T16:15:54.000Z | urllib3/__init__.py | little-car-counter/backend-app | c0f2bef4f620d08c7296ebb5e71dbcaf3bec49f0 | [
"MIT"
] | 1 | 2021-02-08T20:17:40.000Z | 2021-02-08T20:17:40.000Z | urllib3/__init__.py | little-car-counter/backend-app | c0f2bef4f620d08c7296ebb5e71dbcaf3bec49f0 | [
"MIT"
] | null | null | null | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.18'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| 29.402062 | 84 | 0.741234 |
bb25e0038e419aac3ee5c02737250a3757960b93 | 221 | py | Python | contrib/wallettools/walletchangepass.py | gondel/mikron | 81baad599592863d4e4df5482ed851bd606915b5 | [
"MIT"
] | null | null | null | contrib/wallettools/walletchangepass.py | gondel/mikron | 81baad599592863d4e4df5482ed851bd606915b5 | [
"MIT"
] | null | null | null | contrib/wallettools/walletchangepass.py | gondel/mikron | 81baad599592863d4e4df5482ed851bd606915b5 | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:38752")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| 36.833333 | 49 | 0.769231 |
bc0475d9f1e4d1e14cd1313ab9d250a8edb6950f | 4,199 | py | Python | pystratis/api/connectionmanager/tests/test_connectionmanager.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/connectionmanager/tests/test_connectionmanager.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/connectionmanager/tests/test_connectionmanager.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | import pytest
from pytest_mock import MockerFixture
from pystratis.api.connectionmanager import ConnectionManager
from pystratis.api.connectionmanager.responsemodels import *
from pystratis.core.networks import StraxMain, CirrusMain
def test_all_strax_endpoints_implemented(strax_swagger_json):
paths = [key.lower() for key in strax_swagger_json['paths']]
for endpoint in paths:
if ConnectionManager.route + '/' in endpoint:
assert endpoint in ConnectionManager.endpoints
def test_all_cirrus_endpoints_implemented(cirrus_swagger_json):
paths = [key.lower() for key in cirrus_swagger_json['paths']]
for endpoint in paths:
if ConnectionManager.route + '/' in endpoint:
assert endpoint in ConnectionManager.endpoints
def test_all_interfluxstrax_endpoints_implemented(interfluxstrax_swagger_json):
paths = [key.lower() for key in interfluxstrax_swagger_json['paths']]
for endpoint in paths:
if ConnectionManager.route + '/' in endpoint:
assert endpoint in ConnectionManager.endpoints
def test_all_interfluxcirrus_endpoints_implemented(interfluxcirrus_swagger_json):
paths = [key.lower() for key in interfluxcirrus_swagger_json['paths']]
for endpoint in paths:
if ConnectionManager.route + '/' in endpoint:
assert endpoint in ConnectionManager.endpoints
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_addnode(mocker: MockerFixture, network):
data = True
mocker.patch.object(ConnectionManager, 'get', return_value=data)
connection_manager = ConnectionManager(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = connection_manager.addnode(ipaddr='http://localhost', command='add')
assert response
# noinspection PyUnresolvedReferences
connection_manager.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_getpeerinfo(mocker: MockerFixture, network):
data = [
{
"id": 0,
"addr": "[::ffff:x.x.x.x]:17105",
"addrlocal": "[::ffff:x.x.x.x]:52424",
"services": "9",
"relaytxes": False,
"lastsend": 0,
"lastrecv": 0,
"bytessent": 0,
"bytesrecv": 0,
"conntime": 0,
"timeoffset": 0,
"pingtime": 0,
"minping": 0,
"pingwait": 0,
"version": 70012,
"subver": "StratisFullNode:1.x.x.x (70012)",
"inbound": False,
"addnode": False,
"startingheight": 378614,
"banscore": 0,
"synced_headers": 0,
"synced_blocks": 0,
"whitelisted": False,
"inflight": None,
"bytessent_per_msg": None,
"bytesrecv_per_msg": None
},
{
"id": 1,
"addr": "[::ffff:x.x.x.x]:17105",
"addrlocal": "[::ffff:x.x.x.x]:33104",
"services": "9",
"relaytxes": False,
"lastsend": 0,
"lastrecv": 0,
"bytessent": 0,
"bytesrecv": 0,
"conntime": 0,
"timeoffset": 0,
"pingtime": 0,
"minping": 0,
"pingwait": 0,
"version": 70012,
"subver": "StratisFullNode:1.x.x.x (70012)",
"inbound": False,
"addnode": False,
"startingheight": 379357,
"banscore": 0,
"synced_headers": 0,
"synced_blocks": 0,
"whitelisted": False,
"inflight": None,
"bytessent_per_msg": None,
"bytesrecv_per_msg": None
}
]
mocker.patch.object(ConnectionManager, 'get', return_value=data)
connection_manager = ConnectionManager(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = connection_manager.getpeerinfo()
assert response == [PeerInfoModel(**x) for x in data]
# noinspection PyUnresolvedReferences
connection_manager.get.assert_called_once()
| 35.888889 | 115 | 0.608716 |
1404b79b463bd909a4e35a616500a1be2d1499ee | 7,952 | py | Python | test/test_exceptions.py | roberto-mello/actifio-python-package | 57769d84915e4daef2166b49cfb19efdea826562 | [
"MIT"
] | null | null | null | test/test_exceptions.py | roberto-mello/actifio-python-package | 57769d84915e4daef2166b49cfb19efdea826562 | [
"MIT"
] | null | null | null | test/test_exceptions.py | roberto-mello/actifio-python-package | 57769d84915e4daef2166b49cfb19efdea826562 | [
"MIT"
] | null | null | null | import unittest
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
from Actifio import Actifio
from Actifio.actexceptions import *
appliance = os.environ['APPLIANCE']
user = os.environ['ACTUSER']
password = os.environ['ACTPASS']
agm = os.environ['ACTAGM']
wrong_applaince = os.environ['WRONGAPPLIANCE']
wrong_user = os.environ['WRONGUSER']
wrong_password = os.environ['WRONGPASS']
oracle_db = os.environ['ORADB']
ora_source = os.environ['ORASOURCE']
ora_target = os.environ['ORATARGET']
ora_home = os.environ['ORACLE_HOME']
ora_dbname = os.environ['ORACLE_SID']
sql_db = os.environ['SQLDB']
sql_dbinst = os.environ['SQLDBINST']
sql_db_source = os.environ['SQLSOURCE']
sql_db_target = os.environ['SQLTARGET']
sql_inst = os.environ['SQLINST']
sql_inst_inst = os.environ['SQLTARGETINST']
sql_inst_source = os.environ['SQLINSTSOURCE']
sql_inst_target = os.environ['SQLINSTTARGET']
class ExceptionTesting(unittest.TestCase):
def test_incorrect_ip(self):
with self.assertRaises(ActConnectError):
act = Actifio(wrong_applaince, user, password)
act.run_uds_command('info', 'lsversion', {})
def test_wrong_appliance(self):
with self.assertRaises(ActLoginError) as excp:
act = Actifio(agm, user, password)
act.run_uds_command('info', 'lsversion', {})
self.assertEqual(excp.exception.msg, "This does not seem to be a Actifio Sky/CDS appliance")
def test_wrong_user(self):
with self.assertRaises(ActLoginError) as excp:
act = Actifio(appliance, wrong_user, password)
act.run_uds_command('info', 'lsversion', {})
self.assertEqual(excp.exception.msg, "Invalid username or password")
def test_wrong_password(self):
with self.assertRaises(ActLoginError) as excp:
act = Actifio(appliance, user, wrong_password)
act.run_uds_command('info', 'lsversion', {})
self.assertEqual(excp.exception.msg, "Invalid username or password")
def test_incorrect_command(self):
with self.assertRaises(ActAPIError) as excp:
act = Actifio(appliance, user, password)
act.run_uds_command('info', 'lsversion2', {})
with self.assertRaises(ActAPIError) as excp:
act = Actifio(appliance, user, password)
act.run_uds_command('xxx', 'lsversion', {})
def test_get_image_bytime_args(self):
act = Actifio(appliance, user, password)
oracleapp = act.get_applications(appname=oracle_db, hostname=ora_source, appclass="Oracle")
sqlapp = act.get_applications(appname=sql_db, hostname=sql_db_source, appclass="SQLServer")
nonlsapp = act.get_applications(friendlytype="VMBackup")
# incorrect restoretime format in string
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.get_image_bytime(
application=oracleapp[0],
restoretime="03-12-2234 00:00:00",
strict_policy=True
)
self.assertEqual(excp.exception.msg, "'restoretime' need to be in the format of [YYYY-MM-DD HH:mm:ss]")
# strict_policy with non LogSmart app
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.get_image_bytime(
application=nonlsapp[0],
restoretime=datetime.today(),
strict_policy=True
)
self.assertEqual(excp.exception.msg, "'strict_policy=True' is only valid for LogSmart enables applications. This application is not LogSmart enabled.")
# restoretime can't be empty
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.get_image_bytime(
application=oracleapp[0],
restoretime="",
strict_policy=True
)
self.assertEqual(excp.exception.msg, "'restoretime' should be in the type of datetime or string with format of [YYYY-MM-DD HH:mm:ss]")
def test_clone_database_args(self):
act = Actifio(appliance, user, password)
oracleapp = act.get_applications(appname=oracle_db, hostname=ora_source, appclass="Oracle")
sqldb = act.get_applications(appname=sql_db, hostname=sql_db_source , appclass="SQLServer")
sqlinst = act.get_applications(appname=sql_inst, appclass="SQLServerGroup")
nonlsapp = act.get_applications(friendlytype="VMBackup")
target_host_ora = act.get_hosts(hostname=ora_target)
# incorrect restoretime format in string
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application=oracleapp[0],
restoretime="03-12-2234 00:00:00",
target_host=target_host_ora,
strict_policy=True,
ora_home=ora_home,
ora_dbname=ora_dbname
)
self.assertEqual(excp.exception.msg, "'restoretime' need to be in the format of [YYYY-MM-DD HH:mm:ss]")
# restoretime can't be empty
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application=oracleapp[0],
restoretime=None,
target_host=target_host_ora,
strict_policy=True,
ora_home=ora_home,
ora_dbname=ora_dbname
)
self.assertEqual(excp.exception.msg, "'restoretime' should be in the type of datetime or string with format of [YYYY-MM-DD HH:mm:ss]")
# strict_policy
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application=oracleapp[0],
restoretime=datetime.today(),
target_host=target_host_ora,
strict_policy="none",
ora_home=ora_home,
ora_dbname=ora_dbname
)
self.assertEqual(excp.exception.msg, "'strict_policy' should be boolean")
# source_application need to be specified
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
restoretime=datetime.today(),
target_host=target_host_ora,
strict_policy=True,
ora_home=ora_home,
ora_dbname=ora_dbname
)
self.assertEqual(excp.exception.msg, "'source_application' or 'source_appname' and 'source_hostname' need to be specified.")
# source_application need to be actApplication
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application="None",
restoretime=datetime.today(),
target_host=target_host_ora,
strict_policy=True,
ora_home=ora_home,
ora_dbname=ora_dbname
)
self.assertEqual(excp.exception.msg, "'source_application' need to be ActApplication type.")
# oracle params
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application=oracleapp[0],
restoretime=datetime.today(),
target_host=target_host_ora,
strict_policy=True,
oracle_db_name=ora_dbname
)
self.assertEqual(excp.exception.msg, "Required argument is missing: oracle_home")
# oracle params
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application=oracleapp[0],
restoretime=datetime.today(),
target_host=target_host_ora,
strict_policy=True,
oracle_home=ora_home
)
self.assertEqual(excp.exception.msg, "Required argument is missing: oracle_db_name")
# sql params
with self.assertRaises(ActUserError) as excp:
from datetime import datetime
act.clone_database(
source_application=sqldb[0],
restoretime=datetime.today(),
target_host=target_host_ora,
strict_policy=True,
sql_instance_name=sql_dbinst
)
self.assertEqual(excp.exception.msg, "Required argument is missing: sql_db_name")
if __name__ == "__main__":
unittest.main()
| 36.645161 | 155 | 0.697435 |
f269c7a43f0409d4fb3d9fb0f9d50266e00df29d | 3,938 | py | Python | rapid7vmconsole/models/resources_role.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 61 | 2018-05-17T05:57:09.000Z | 2022-03-08T13:59:21.000Z | rapid7vmconsole/models/resources_role.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 33 | 2018-06-26T16:21:14.000Z | 2022-03-03T20:55:47.000Z | rapid7vmconsole/models/resources_role.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 43 | 2018-02-24T05:45:53.000Z | 2022-03-31T22:15:16.000Z | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ResourcesRole(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[Role]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesRole - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesRole. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this ResourcesRole. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesRole.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this ResourcesRole. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesRole. # noqa: E501
The resources returned. # noqa: E501
:return: The resources of this ResourcesRole. # noqa: E501
:rtype: list[Role]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesRole.
The resources returned. # noqa: E501
:param resources: The resources of this ResourcesRole. # noqa: E501
:type: list[Role]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesRole, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesRole):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.347222 | 80 | 0.562722 |
b91f9d35578542f9eed1fecc372090a83a08980f | 1,950 | py | Python | pds_pipelines/config.py | kaitlyndlee/PDS-Pipelines | dbcdd72c5c3e8f96593538752855b10279e94983 | [
"Unlicense"
] | null | null | null | pds_pipelines/config.py | kaitlyndlee/PDS-Pipelines | dbcdd72c5c3e8f96593538752855b10279e94983 | [
"Unlicense"
] | null | null | null | pds_pipelines/config.py | kaitlyndlee/PDS-Pipelines | dbcdd72c5c3e8f96593538752855b10279e94983 | [
"Unlicense"
] | null | null | null | import os
from pathlib import Path
# Database credentials
credentials = {'upc_test': {'user': 'postgres',
'pass': '',
'host': 'localhost',
'port': '5432',
'db': 'upc_test'},
'di_test': {'user': 'postgres',
'pass': '',
'host': 'localhost',
'port': '5432',
'db': 'di_test'},
'clusterjob_test': {'user': 'postgres',
'pass': '',
'host': 'localhost',
'port': '5432',
'db': 'clusterjobs_test'}
}
# Redis path(?) info
redis_info = {'host': 'localhost', 'port': '6379', 'db': '0'}
# POW / MAP2 base path
pow_map2_base = '/pds_san/PDS_Services/'
web_base = 'https://pdsimage.wr.usgs.gov/Missions/'
archive_base = '/pds_san/PDS_Archive/'
derived_base = '/pds_san/PDS_Derived/UPC/images/'
link_dest = '/pds_san/PDS_Archive_Links/'
# Recipe base path
# Uses the root of the project to set the base path for all necessary files
root = Path(__file__).parent.parent
recipe_base = os.path.join(root, 'recipe/new/')
pds_info = os.path.join(root, 'pds_pipelines/PDSinfo.json')
pds_log = os.path.join(root, 'logs/')
slurm_log = os.path.join(root, 'output/')
cmd_dir = os.path.join(root, 'pds_pipelines/')
keyword_def = os.path.join(root, 'recipe/Keyword_Definition.json')
scratch = '/scratch/pds_services/'
workarea = os.path.join(scratch, 'workarea/')
default_namespace = 'adampaquette_queue'
pds_db = 'di_test'
upc_db = 'upc_test'
cluster_db = 'clusterjob_test'
lock_obj = 'processes'
upc_error_queue = 'UPC_ErrorQueue'
disk_usage_ratio = 0.4
# Path where the JSON summary views are written to
summaries_path = '/home/kdlee/builds/PDS-Pipelines/json'
| 32.5 | 75 | 0.557949 |
42bd0542a9582d0977a7fb2681bca1b6f58ffca7 | 518 | py | Python | examples/factory/subclass.py | Argmaster/PyR3 | 6786bcb6a101fe4bd4cc50fe43767b8178504b15 | [
"MIT"
] | 2 | 2021-12-12T18:51:52.000Z | 2022-02-23T09:49:16.000Z | examples/factory/subclass.py | Argmaster/PyR3 | 6786bcb6a101fe4bd4cc50fe43767b8178504b15 | [
"MIT"
] | 2 | 2021-11-08T12:09:02.000Z | 2021-12-12T23:01:12.000Z | examples/factory/subclass.py | Argmaster/PyR3 | 6786bcb6a101fe4bd4cc50fe43767b8178504b15 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from PyR3.bpy import bpy
from PyR3.factory.fields.Number import Integer
from PyR3.factory.fields.String import String
from PyR3.factory.MeshFactory import MeshFactory
from PyR3.shortcut.mesh import addCube
class MeshFactorySubclass(MeshFactory):
size = Integer(value_range=range(1, 5))
name = String(default="name")
def render(self):
cube: bpy.types.Object = addCube(size=self.size)
cube.name = self.name
# cube was just created so it's already selected
| 28.777778 | 56 | 0.722008 |
8bd2e15b42e92e4191fc58ecfe7caadfbca530d6 | 6,054 | py | Python | xm_transformer.py | jasantosm/powerco_scraper_historical | 4aad8a4fdb50ff052391f2ef14b2bbb0662dea71 | [
"MIT"
] | null | null | null | xm_transformer.py | jasantosm/powerco_scraper_historical | 4aad8a4fdb50ff052391f2ef14b2bbb0662dea71 | [
"MIT"
] | null | null | null | xm_transformer.py | jasantosm/powerco_scraper_historical | 4aad8a4fdb50ff052391f2ef14b2bbb0662dea71 | [
"MIT"
] | null | null | null | import pandas as pd
import time
from bs4 import BeautifulSoup
from mysql_service import insert_day, get_days
from sqlalchemy import create_engine
def transform():
days_raw = get_days()
days_list = []
for n in range(len(days_raw)):
titles = days_raw[n]['titles'].split('|')
t_raw = days_raw[n]['tables'].split('^_^')
date = days_raw[n]['date']
day_dict = {}
day_dict['date'] = date
# 0. Generacion
try:
generacion = pd.read_html(t_raw[0], index_col=0)[0]
day_dict['generacion_total_programada_redespacho'] = generacion.loc['GENERACION'].loc['Programada Redespacho (GWh)']
day_dict['generacion_total_programada_despacho'] = generacion.loc['GENERACION'].loc['Programada Despacho (GWh)']
day_dict['generacion_total_real'] = generacion.loc['GENERACION'].loc['Real (GWh)']
except:
day_dict['generacion_total_programada_redespacho'] = 'ND'
day_dict['generacion_total_programada_despacho'] = 'ND'
day_dict['generacion_total_real'] = 'ND'
print(f'Error en generacion el dia: {date}')
# 1. Intercambios internacionales
try:
intercambios_internacionales = pd.read_html(t_raw[1], index_col=0)[0]
day_dict['importacion_programada_redespacho'] = intercambios_internacionales.loc['Importaciones'].loc['Programada Redespacho (GWh)']
day_dict['importacion__real'] = intercambios_internacionales.loc['Importaciones'].loc['Real (GWh)']
day_dict['exportacion_programada_redespacho'] = intercambios_internacionales.loc['Exportaciones'].loc['Programada Redespacho (GWh)']
day_dict['exportacion__real'] = intercambios_internacionales.loc['Exportaciones'].loc['Real (GWh)']
except:
day_dict['importacion_programada_redespacho'] = 'ND'
day_dict['importacion__real'] = 'ND'
day_dict['exportacion_programada_redespacho'] = 'ND'
day_dict['exportacion__real'] = 'ND'
print(f'Error en intercambios internacionales el dia: {date}')
# 2. Disponibilidad
try:
disponibilidad = pd.read_html(t_raw[2], index_col=0)[0]
day_dict['disponibilidad_real'] = disponibilidad.loc['DISPONIBILIDAD'].loc['Real (MW)']
except:
day_dict['disponibilidad_real'] = 'ND'
print(f'Error en disponibilidad el dia: {date}')
# 3. Demanda no atendida
try:
demanda_no_atendida = pd.read_html(t_raw[3], index_col=0)[0]
day_dict['demanda_no_atendida'] = demanda_no_atendida.loc['Total Demanda no atendida -SIN-'].loc['MWh']
except:
day_dict['demanda_no_atendida'] = 'ND'
print(f'Error en demanda no atendida el dia: {date}')
# 7. Costos
try:
demanda_no_atendida = pd.read_html(t_raw[7], index_col=0)[0]
day_dict['costo_marginal_promedio_redespacho'] = demanda_no_atendida.loc['Costo Marginal Promedio del Redespacho ($/kWh)'].loc['$/kWh']
except:
day_dict['costo_marginal_promedio_redespacho'] = 'ND'
print(f'Error en costo marginal promedio el dia: {date}')
# 8. Aportes
try:
aportes = pd.read_html(t_raw[9], index_col=0)[0]
indices = aportes.index
indice_aportes = ''
columna_aportes = ''
for indice in indices:
if str(indice) == 'TOTAL -SIN-':
indice_aportes = 'TOTAL -SIN-'
columna_aportes = 'GWh'
elif str(indice) == 'Total SIN':
indice_aportes = 'Total SIN'
columna_aportes = "Caudal GWh"
day_dict['aportes_hidricos'] = aportes.loc[indice_aportes].loc[columna_aportes]
except:
day_dict['aportes_hidricos'] = "ND"
print(f'Error en aportes el dia: {date}')
# 9. Reservas
try:
reservas = pd.read_html(t_raw[10], index_col=0)[0]
indices = reservas.index
indice_reservas = ''
columna_reservas = ''
for indice in indices:
if str(indice) == 'TOTAL -SIN-':
indice_reservas = 'TOTAL -SIN-'
elif str(indice) == 'Total SIN':
indice_reservas = 'Total SIN'
columnas = reservas.columns
for columna in columnas:
if str(columna) == 'Volumen Util Diario GWh':
columna_reservas_1 = 'Volumen Util Diario GWh'
elif str(columna) == 'Volumen Util Diario GWh(1)':
columna_reservas_1 = 'Volumen Util Diario GWh(1)'
for columna in columnas:
if str(columna) == 'Volumen GWh':
columna_reservas_2 = 'Volumen GWh'
elif str(columna) == 'Volumen GWh(4)':
columna_reservas_2 = 'Volumen GWh(4)'
day_dict['volumen_util_diario'] = reservas.loc[indice_reservas].loc[columna_reservas_1]
day_dict['volumen'] = reservas.loc[indice_reservas].loc[columna_reservas_2]
except:
day_dict['volumen_util_diario'] = 'ND'
day_dict['volumen'] = 'ND'
print(f'Error en volumen: {date}')
days_list.append(day_dict)
print('\n')
print('Dia transformado: ', date)
return days_list
def to_mysql():
df = pd.DataFrame(transform())
#Clave GCP SQL Instance: qju7lep86r1L4Nod
engine = create_engine('mysql+mysqldb://root:qju7lep86r1L4Nod@34.86.123.197:3306/xmdata', echo = False)
df.to_sql(name = 'data_prepared', con = engine, if_exists = 'append', index = False)
def main():
to_mysql()
if __name__ == "__main__":
main() | 37.602484 | 147 | 0.575487 |
0d0ab2e901458e7663ef84b3bb90efd803d0e7d9 | 389 | py | Python | hptest/wsgi.py | aitomato123/hptest | 842cf5f8c9f419c1a9395f43563d19213a1d531a | [
"Apache-2.0"
] | null | null | null | hptest/wsgi.py | aitomato123/hptest | 842cf5f8c9f419c1a9395f43563d19213a1d531a | [
"Apache-2.0"
] | null | null | null | hptest/wsgi.py | aitomato123/hptest | 842cf5f8c9f419c1a9395f43563d19213a1d531a | [
"Apache-2.0"
] | null | null | null | """
WSGI config for hptest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hptest.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
ac14f6b868288bea41e7d563ee0a59969ab1b911 | 474 | py | Python | docs/source/examples/FB2.0/post_file_systems_policies.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | docs/source/examples/FB2.0/post_file_systems_policies.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | docs/source/examples/FB2.0/post_file_systems_policies.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # attach policy to a file system
# assume we have a policy named "p1", and a file system with id
# "100abf42-0000-4000-8023-000det400090"
res = client.post_file_systems_policies(policy_names=["p1"],
member_ids=["100abf42-0000-4000-8023-000det400090"])
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Other valid fields: policy_ids, member_names
# See section "Common Fields" for examples
| 43.090909 | 92 | 0.702532 |
e6d482ff1eb2f0158e76c6b4d174a5992780f646 | 737 | py | Python | instagram/images/migrations/0003_auto_20181129_1801.py | Yesdoing/yestagram | a8f69b0fa2cb460d578717ac132900c1a1077d57 | [
"MIT"
] | 5 | 2019-05-08T07:34:42.000Z | 2020-08-16T16:54:53.000Z | instagram/images/migrations/0003_auto_20181129_1801.py | Yesdoing/Instagram | a8f69b0fa2cb460d578717ac132900c1a1077d57 | [
"MIT"
] | 11 | 2019-12-29T10:18:20.000Z | 2022-02-26T14:28:23.000Z | instagram/images/migrations/0003_auto_20181129_1801.py | Yesdoing/Instagram | a8f69b0fa2cb460d578717ac132900c1a1077d57 | [
"MIT"
] | 2 | 2019-01-30T11:45:31.000Z | 2020-08-16T16:54:55.000Z | # Generated by Django 2.0.9 on 2018-11-29 09:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20181128_1431'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='comments', to='images.Image'),
),
migrations.AlterField(
model_name='like',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='likes', to='images.Image'),
),
]
| 29.48 | 136 | 0.632293 |
235353cb89bdca5e9468b004378f912557b559e5 | 2,267 | py | Python | test/tests.py | ithaaswin/TeachersPetBot | e826083b64ea6ab78a223635fcb1d7881ba8b082 | [
"MIT"
] | null | null | null | test/tests.py | ithaaswin/TeachersPetBot | e826083b64ea6ab78a223635fcb1d7881ba8b082 | [
"MIT"
] | 57 | 2021-10-31T23:15:20.000Z | 2021-11-15T18:10:41.000Z | test/tests.py | ithaaswin/TeachersPetBot | e826083b64ea6ab78a223635fcb1d7881ba8b082 | [
"MIT"
] | 5 | 2021-11-17T23:21:01.000Z | 2021-11-30T03:44:05.000Z | import os
import platform
import asyncio
import discord
from dotenv import load_dotenv
import test_office_hours
import test_event_creation
import test_qna
import test_calendar
import test_profanity
import test_attendance
import test_help
if platform.system() == 'Windows':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
load_dotenv()
TOKEN = os.getenv('TESTING_BOT_TOKEN')
TEST_GUILD_ID = int(os.getenv('TEST_GUILD_ID'))
testing_bot = discord.Client()
async def run_tests():
exit_status = 0
await begin_tests()
try:
print('testing QNA\n----------')
await test_qna.test(testing_bot, TEST_GUILD_ID)
print('testing office hours\n----------')
await test_office_hours.test(testing_bot, TEST_GUILD_ID)
print('testing event creation\n----------')
await test_event_creation.test(testing_bot, TEST_GUILD_ID)
print('testing calendar\n----------')
await test_calendar.test(testing_bot, TEST_GUILD_ID)
print('testing profanity\n----------')
await test_profanity.test(testing_bot, TEST_GUILD_ID)
print('testing attendance\n----------')
await test_attendance.test(testing_bot, TEST_GUILD_ID)
print('testing help\n----------')
await test_help.test(testing_bot, TEST_GUILD_ID)
except AssertionError as ex:
print('exception: ', type(ex).__name__ + ':', ex)
print('--')
exit_status = 1
finally:
await end_tests()
print('exit_status: ', exit_status)
assert exit_status == 0
await testing_bot.close()
@testing_bot.event
async def on_ready():
print('Testing bot running')
print('------')
await run_tests()
async def begin_tests():
await next(ch for ch in testing_bot.get_guild(TEST_GUILD_ID).text_channels
if ch.name == 'instructor-commands').send('!begin-tests')
async def end_tests():
await next(ch for ch in testing_bot.get_guild(TEST_GUILD_ID).text_channels
if ch.name == 'instructor-commands').send('!end-tests')
if __name__ == '__main__':
testing_bot.run(TOKEN)
###########################
# Function: test_dummy
# Description: Run the bot
###########################
def test_bot():
testing_bot.run(TOKEN)
| 29.441558 | 78 | 0.662991 |
1f23179f10e007cb39a57c24308054686567cfe0 | 89 | py | Python | contact/apps.py | 85599/my-first-contact-app | dda8c12cd9232ee6f962d11e18c397d9c5a2f251 | [
"MIT"
] | 446 | 2015-01-04T20:58:26.000Z | 2022-03-30T23:08:26.000Z | project/contact/apps.py | giannisdaras/tedxntua2019 | 0f9ebdb2946cc8da8c44562313be740db8a394ea | [
"MIT"
] | 649 | 2015-01-09T23:42:14.000Z | 2022-03-31T17:27:19.000Z | project/contact/apps.py | giannisdaras/tedxntua2019 | 0f9ebdb2946cc8da8c44562313be740db8a394ea | [
"MIT"
] | 319 | 2015-01-06T20:58:42.000Z | 2022-03-30T06:29:04.000Z | from django.apps import AppConfig
class ContactConfig(AppConfig):
name = 'contact'
| 14.833333 | 33 | 0.752809 |
1013cb6dc97156a9b477aceaf80936f55992ff4a | 13,902 | py | Python | plotly/graph_objs/isosurface/caps/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 6 | 2019-05-03T02:12:04.000Z | 2020-03-01T06:33:21.000Z | plotly/graph_objs/isosurface/caps/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | null | null | null | plotly/graph_objs/isosurface/caps/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 5 | 2019-05-18T16:50:11.000Z | 2021-07-06T21:14:36.000Z |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Z(_BaseTraceHierarchyType):
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# show
# ----
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the z `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['show']
@show.setter
def show(self, val):
self['show'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface.caps'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the z `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Z object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.caps.Z
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the z `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
Z
"""
super(Z, self).__init__('z')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.caps.Z
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.caps.Z"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface.caps import (z as v_z)
# Initialize validators
# ---------------------
self._validators['fill'] = v_z.FillValidator()
self._validators['show'] = v_z.ShowValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('show', None)
self['show'] = show if show is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Y(_BaseTraceHierarchyType):
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# show
# ----
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the y `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['show']
@show.setter
def show(self, val):
self['show'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface.caps'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the y `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Y object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.caps.Y
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the y `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
Y
"""
super(Y, self).__init__('y')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.caps.Y
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.caps.Y"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface.caps import (y as v_y)
# Initialize validators
# ---------------------
self._validators['fill'] = v_y.FillValidator()
self._validators['show'] = v_y.ShowValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('show', None)
self['show'] = show if show is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class X(_BaseTraceHierarchyType):
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['fill']
@fill.setter
def fill(self, val):
self['fill'] = val
# show
# ----
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the x `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['show']
@show.setter
def show(self, val):
self['show'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'isosurface.caps'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the x `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new X object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.isosurface.caps.X
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the x `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
X
"""
super(X, self).__init__('x')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.caps.X
constructor must be a dict or
an instance of plotly.graph_objs.isosurface.caps.X"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.isosurface.caps import (x as v_x)
# Initialize validators
# ---------------------
self._validators['fill'] = v_x.FillValidator()
self._validators['show'] = v_x.ShowValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('fill', None)
self['fill'] = fill if fill is not None else _v
_v = arg.pop('show', None)
self['show'] = show if show is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 30.824834 | 82 | 0.547044 |
d4331c16ea10a0efef56c5ab8bf65bb982fadd30 | 5,041 | py | Python | realtime_recognition.py | SHANK885/realtime_face_recognition | 4cb3555e58990a4b853dc21af01b13d06e899c2c | [
"MIT"
] | 2 | 2020-07-02T19:35:46.000Z | 2020-11-21T19:31:23.000Z | realtime_recognition.py | SHANK885/realtime_face_recognition | 4cb3555e58990a4b853dc21af01b13d06e899c2c | [
"MIT"
] | null | null | null | realtime_recognition.py | SHANK885/realtime_face_recognition | 4cb3555e58990a4b853dc21af01b13d06e899c2c | [
"MIT"
] | null | null | null | from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
K.set_image_data_format('channels_first')
import cv2
import json
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from fr_utils import *
from triplet_loss import triplet_loss
from inception_blocks_v2 import *
def create_encoding(image, model):
img = image[...,::-1]
img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)
x_train = np.array([img])
embedding = model.predict_on_batch(x_train)
return embedding
def who_is_it(image_path, database, model):
"""
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
### START CODE HERE ###
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
encoding = create_encoding(image_path, model)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (≈1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
dist = np.linalg.norm(encoding-db_enc)
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
### END CODE HERE ###
if min_dist > 0.85:
print("Not in the database.")
print("distance", min_dist)
identity = "Unknown"
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
def main():
embedding_path = "./database/embeddings/embeddings.json"
face_detector_path = "./classifiers/haarcascade_frontalface_default.xml"
FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())
# load trained model
FRmodel.compile(optimizer='adam', loss=triplet_loss, metrics=['accuracy'])
load_weights_from_FaceNet(FRmodel)
with open(embedding_path, 'r') as infile:
database = json.load(infile)
#who_is_it("images/camera_0.jpg", database, FRmodel)
video_capture = cv2.VideoCapture(0)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
face_detector = cv2.CascadeClassifier(face_detector_path)
print("above while")
while True:
# capture frame
if video_capture.isOpened():
ret, frame = video_capture.read()
raw_frame = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30))
if len(faces) > 0:
for (x, y, w, h) in faces:
cropped = raw_frame[y:y+h, x:x+w]
image = cv2.resize(cropped,
(96, 96),
interpolation=cv2.INTER_LINEAR)
min_dist, identity = who_is_it(image, database, FRmodel)
if identity == 'Unknown':
box_color = (0, 0, 255)
text_color = (0, 0, 255)
else:
box_color = (0, 255, 0)
text_color = (255, 0, 0)
cv2.rectangle(frame,
(x, y),
(x+w, y+h),
box_color,
2)
cv2.putText(frame,
identity,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.75,
text_color,
thickness=2,
lineType=2)
cv2.imshow('Realtime Recognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 32.733766 | 114 | 0.592343 |
49e7e4ad2b1a5d210f92bd540bc9b0b5ba9ccba3 | 3,529 | py | Python | spyder/__init__.py | StefRe/spyder | 210495f5b691cc2986a437c237cce8de4ab06b79 | [
"MIT"
] | 3 | 2019-09-27T21:00:00.000Z | 2021-03-07T23:28:32.000Z | spyder/__init__.py | StefRe/spyder | 210495f5b691cc2986a437c237cce8de4ab06b79 | [
"MIT"
] | 2 | 2021-09-19T06:31:54.000Z | 2022-02-27T20:21:27.000Z | spyder/__init__.py | StefRe/spyder | 210495f5b691cc2986a437c237cce8de4ab06b79 | [
"MIT"
] | 2 | 2021-04-30T01:18:22.000Z | 2021-09-19T06:31:42.000Z | # -*- coding: utf-8 -*-
"""
MIT License
===========
The spyder/images dir and some source files under other terms (see NOTICE.txt).
Copyright (c) 2009- Spyder Project Contributors and others (see AUTHORS.txt)
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
version_info = (5, 0, 0, "dev0")
__version__ = '.'.join(map(str, version_info))
__license__ = __doc__
__project_url__ = 'https://github.com/spyder-ide/spyder'
__forum_url__ = 'https://groups.google.com/group/spyderlib'
__trouble_url__ = __project_url__ + '/wiki/Troubleshooting-Guide-and-FAQ'
__trouble_url_short__ = 'https://tinyurl.com/SpyderHelp'
__website_url__ = 'https://www.spyder-ide.org/'
# Dear (Debian, RPM, ...) package makers, please feel free to customize the
# following path to module's data (images) and translations:
DATAPATH = LOCALEPATH = DOCPATH = MATHJAXPATH = JQUERYPATH = ''
import os
# Directory of the current file
__dir__ = os.path.dirname(os.path.abspath(__file__))
def add_to_distribution(dist):
"""Add package to py2exe/cx_Freeze distribution object
Extension to guidata.disthelpers"""
try:
dist.add_qt_bindings()
except AttributeError:
raise ImportError("This script requires guidata 1.5+")
for _modname in ('spyder', 'spyderplugins'):
dist.add_module_data_files(_modname, ("", ),
('.png', '.svg', '.html', '.png', '.txt',
'.js', '.inv', '.ico', '.css', '.doctree',
'.qm', '.py',),
copy_to_root=False)
def get_versions(reporev=True):
"""Get version information for components used by Spyder"""
import sys
import platform
import qtpy
import qtpy.QtCore
revision = None
if reporev:
from spyder.utils import vcs
revision, branch = vcs.get_git_revision(os.path.dirname(__dir__))
if not sys.platform == 'darwin': # To avoid a crash with our Mac app
system = platform.system()
else:
system = 'Darwin'
return {
'spyder': __version__,
'python': platform.python_version(), # "2.7.3"
'bitness': 64 if sys.maxsize > 2**32 else 32,
'qt': qtpy.QtCore.__version__,
'qt_api': qtpy.API_NAME, # PyQt5
'qt_api_ver': qtpy.PYQT_VERSION,
'system': system, # Linux, Windows, ...
'release': platform.release(), # XP, 10.6, 2.2.0, etc.
'revision': revision, # '9fdf926eccce',
'branch': branch, # '4.x' or master
}
| 36.381443 | 79 | 0.669878 |
22f4652c471f33946c0e1615b9e2cf10088f2dc3 | 3,158 | py | Python | tests/unit/nn/test_rescale.py | schiotz/nequip | c343ce25ecfeb64f6df92e96022e673a7714e3a6 | [
"MIT"
] | 153 | 2021-06-20T20:12:01.000Z | 2022-03-31T13:57:45.000Z | tests/unit/nn/test_rescale.py | schiotz/nequip | c343ce25ecfeb64f6df92e96022e673a7714e3a6 | [
"MIT"
] | 25 | 2021-06-17T16:00:16.000Z | 2022-03-29T07:04:00.000Z | tests/unit/nn/test_rescale.py | schiotz/nequip | c343ce25ecfeb64f6df92e96022e673a7714e3a6 | [
"MIT"
] | 25 | 2021-06-21T22:25:22.000Z | 2022-03-30T04:39:46.000Z | import pytest
import sys
if sys.version_info[1] >= 7:
import contextlib
else:
# has backport of nullcontext
import contextlib2 as contextlib
import torch
from e3nn.util.test import assert_auto_jitable
from nequip.nn import RescaleOutput
from nequip.data import AtomicDataDict, AtomicData
from nequip.utils.test import assert_AtomicData_equivariant
from nequip.nn.embedding import OneHotAtomEncoding
@pytest.mark.parametrize("scale_by", [0.77, 1.0, None])
@pytest.mark.parametrize("shift_by", [0.0, 0.4443, None])
@pytest.mark.parametrize("scale_trainable", [True, False])
@pytest.mark.parametrize("shift_trainable", [True, False])
def test_rescale(
CH3CHO,
scale_by,
shift_by,
scale_trainable,
shift_trainable,
):
_, data = CH3CHO
oh = OneHotAtomEncoding(
num_types=3,
irreps_in=data.irreps,
)
# some combinations are illegal and should raise
build_with = contextlib.nullcontext()
if scale_by is None and scale_trainable:
build_with = pytest.raises(ValueError)
elif shift_by is None and shift_trainable:
build_with = pytest.raises(ValueError)
rescale = None
with build_with:
rescale = RescaleOutput(
model=oh,
scale_keys=AtomicDataDict.NODE_ATTRS_KEY,
shift_keys=AtomicDataDict.NODE_ATTRS_KEY,
scale_by=scale_by,
shift_by=shift_by,
scale_trainable=scale_trainable,
shift_trainable=shift_trainable,
)
if rescale is None:
return
# == Check basics ==
assert_auto_jitable(rescale)
for training_mode in [True, False]:
rescale.train(training_mode)
rescale(AtomicData.to_AtomicDataDict(data))
assert_AtomicData_equivariant(rescale, data)
# == Check scale/shift ==
for training_mode in [True, False]:
rescale.train(training_mode)
oh_out = oh(AtomicData.to_AtomicDataDict(data))[AtomicDataDict.NODE_ATTRS_KEY]
rescale_out = rescale(AtomicData.to_AtomicDataDict(data))[
AtomicDataDict.NODE_ATTRS_KEY
]
if training_mode:
assert torch.all(oh_out == rescale_out)
continue # don't test anything else
# we are now in eval mode if here, test rescaling
# node attrs are a one hot, so we know orig then are zeros and ones
if scale_by is None and shift_by is None:
assert torch.all(oh_out == rescale_out)
if shift_by is None:
# no shift preserves zeros
assert torch.all((rescale_out == 0.0) == (oh_out == 0.0))
if scale_by is None and shift_by is not None:
# check that difference is right
assert torch.allclose(rescale_out - oh_out, torch.as_tensor(shift_by))
if scale_by is not None and shift_by is None:
# check that ratio is right
ratio = torch.nan_to_num(rescale_out / oh_out)
assert torch.allclose(ratio[oh_out != 0.0], torch.as_tensor(scale_by))
if scale_by is not None and shift_by is not None:
assert torch.allclose(rescale_out, scale_by * oh_out + shift_by)
| 33.956989 | 86 | 0.669411 |
b0adfc852cd1869c411c6b43c54a979fc04ad200 | 1,138 | py | Python | src/unittest/python/grundlegend/test_multiplikation.py | dlangheiter-tgm/test-mirror | 9878da44953c40abc1df0311f275c3eebc2e876b | [
"MIT"
] | null | null | null | src/unittest/python/grundlegend/test_multiplikation.py | dlangheiter-tgm/test-mirror | 9878da44953c40abc1df0311f275c3eebc2e876b | [
"MIT"
] | null | null | null | src/unittest/python/grundlegend/test_multiplikation.py | dlangheiter-tgm/test-mirror | 9878da44953c40abc1df0311f275c3eebc2e876b | [
"MIT"
] | null | null | null | """
Created on 27.12.2013
@author: Walter Rafeiner-Magor <wrafeiner-magor@tgm.ac.at>
"""
import unittest
from bruch.Bruch import *
class TestMultiplikation(unittest.TestCase):
def setUp(self):
self.b = Bruch(3, 2)
self.b2 = Bruch(self.b)
self.b3 = Bruch(4, 2)
pass
def tearDown(self):
del self.b, self.b2, self.b3
pass
def testmal(self):
self.b = self.b * Bruch(4)
assert(float(self.b) == 6)
def testmal2(self):
self.b = self.b * self.b2
assert(float(self.b) == 2.25)
def testmal3(self):
self.b2 = self.b * 2
assert(float(self.b2) == 3)
def testiMulError(self):
self.assertRaises(TypeError, self.b.__imul__, "other")
def testiMul(self):
self.b *= 2
assert(self.b == 3)
def testiMul2(self):
self.b *= Bruch(2)
assert(self.b == 3)
def testrmal(self):
self.b2 = 2 * Bruch(3, 2)
assert(float(self.b2) == 3)
def testmulError(self):
self.assertRaises(TypeError, self.b2.__mul__, 2.0)
if __name__ == "__main__":
unittest.main() | 21.471698 | 62 | 0.566784 |
d75f6714f230bff77455af5b7848968dc2f3e5c7 | 61 | py | Python | test/test_person.py | Eawag-SWW/datapool_client | a43c38f0f858a687d6354ef4d857beee59882c8a | [
"MIT"
] | null | null | null | test/test_person.py | Eawag-SWW/datapool_client | a43c38f0f858a687d6354ef4d857beee59882c8a | [
"MIT"
] | null | null | null | test/test_person.py | Eawag-SWW/datapool_client | a43c38f0f858a687d6354ef4d857beee59882c8a | [
"MIT"
] | null | null | null | def test_person_all(setup_postgres, dp):
dp.person.all()
| 20.333333 | 40 | 0.737705 |
9311787b856c3a439ee3087f9675b9123c85a13d | 1,962 | py | Python | hparams.py | kokeshing/tacotron2-1 | a04f57c3bf1b0182f5a15770cac03039f1b414f8 | [
"BSD-3-Clause"
] | null | null | null | hparams.py | kokeshing/tacotron2-1 | a04f57c3bf1b0182f5a15770cac03039f1b414f8 | [
"BSD-3-Clause"
] | null | null | null | hparams.py | kokeshing/tacotron2-1 | a04f57c3bf1b0182f5a15770cac03039f1b414f8 | [
"BSD-3-Clause"
] | null | null | null | from text import symbols
################################
# Experiment Parameters #
################################
epochs = 500
iters_per_checkpoint = 3000
seed = 1234
dynamic_loss_scaling = True
fp16_run = False
distributed_run = False
dist_backend = "nccl"
dist_url = "tcp://localhost:54321"
cudnn_enabled = True
cudnn_benchmark = True
ignore_layers = ['embedding.weight']
################################
# Data Parameters #
################################
load_mel_from_disk = False
training_files = 'filelists/ljs_audio_text_train_filelist.txt'
validation_files = 'filelists/ljs_audio_text_val_filelist.txt'
text_cleaners = ['english_cleaners']
################################
# Audio Parameters #
################################
max_wav_value = 32768.0
sampling_rate = 22050
filter_length = 1024
hop_length = 256
win_length = 1024
n_mel_channels = 80
mel_fmin = 0.0
mel_fmax = None
################################
# Model Parameters #
################################
n_symbols = len(symbols)
symbols_embedding_dim = 512
# Encoder parameters
encoder_kernel_size = 5
encoder_n_convolutions = 3
encoder_embedding_dim = 512
# Decoder parameters
n_frames_per_step = 1 # currently only 1 is supported
decoder_rnn_dim = 1024
prenet_dim = 256
max_decoder_steps = 1000
gate_threshold = 0.5
p_attention_dropout = 0.1
p_decoder_dropout = 0.1
# Attention parameters
attention_rnn_dim = 1024
attention_dim = 128
# Location Layer parameters
attention_location_n_filters = 32
attention_location_kernel_size = 31
# Mel-post processing network parameters
postnet_embedding_dim = 512
postnet_kernel_size = 5
postnet_n_convolutions = 5
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate = False
learning_rate = 4e-4
weight_decay = 1e-6
grad_clip_thresh = 1.0
batch_size = 24
mask_padding = True # set model's padded outputs to padded value
| 24.222222 | 65 | 0.649337 |
fc38e207b2dad68c026009563af5d233a145cee8 | 5,928 | py | Python | doc/source/conf.py | bonzoyang/d2m | 3c491a019c7436245ab6a3ac872c969c3a51a369 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | bonzoyang/d2m | 3c491a019c7436245ab6a3ac872c969c3a51a369 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | bonzoyang/d2m | 3c491a019c7436245ab6a3ac872c969c3a51a369 | [
"Apache-2.0"
] | 1 | 2021-07-27T14:25:16.000Z | 2021-07-27T14:25:16.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../src'))
# -- Project information -----------------------------------------------------
project = 'd2m'
copyright = '2021, bonzoyang'
author = 'bonzoyang'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.0.1 alpha'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Added by bonzo
# [ref] phinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_mock_imports
autodoc_mock_imports = ['pandas']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'd2mdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'd2m.tex', 'd2m Documentation',
'bonzoyang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'd2m', 'd2m Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'd2m', 'd2m Documentation',
author, 'd2m', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 29.788945 | 90 | 0.643556 |
3e6e12607a5a885c2c3e96888adeaf00b0140c47 | 1,046 | py | Python | twitter/tweets/migrations/0005_comment.py | vBubbaa/django-twitter | 925405dd05cbef3b325b2168663e183b927d8586 | [
"MIT"
] | 1 | 2020-02-08T02:16:05.000Z | 2020-02-08T02:16:05.000Z | twitter/tweets/migrations/0005_comment.py | vBubbaa/django-twitter | 925405dd05cbef3b325b2168663e183b927d8586 | [
"MIT"
] | 5 | 2020-02-13T22:36:04.000Z | 2021-09-22T18:36:50.000Z | twitter/tweets/migrations/0005_comment.py | vBubbaa/django-twitter | 925405dd05cbef3b325b2168663e183b927d8586 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2020-02-07 23:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tweets', '0004_auto_20200206_1701'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('text', models.CharField(max_length=200)),
('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='tweets.Tweet')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
]
| 37.357143 | 143 | 0.656788 |
70206327c272c4b1a0e9a9741d3dbaa61e5d5cdb | 638 | py | Python | exam/myproject/myapp/admin.py | wasit7/cs459_2018 | 78243bbc939fcc2ed7528df8c14ad75e4b78d9a2 | [
"BSD-2-Clause"
] | 2 | 2019-09-04T08:13:47.000Z | 2019-09-19T07:12:18.000Z | exam/myproject/myapp/admin.py | wasit7/cs459_2018 | 78243bbc939fcc2ed7528df8c14ad75e4b78d9a2 | [
"BSD-2-Clause"
] | 10 | 2020-03-24T17:05:24.000Z | 2022-03-11T23:47:49.000Z | exam/myproject/myapp/admin.py | wasit7/cs459_2018 | 78243bbc939fcc2ed7528df8c14ad75e4b78d9a2 | [
"BSD-2-Clause"
] | 9 | 2019-08-30T04:00:58.000Z | 2019-11-19T04:48:07.000Z | from django.contrib import admin
from myapp.models import Customer, Transaction, Item, Record
class CustomerAdmin(admin.ModelAdmin):
list_display=[f.name for f in Customer._meta.fields]
admin.site.register(Customer, CustomerAdmin)
class TransactionAdmin(admin.ModelAdmin):
list_display=[f.name for f in Transaction._meta.fields]
admin.site.register(Transaction, TransactionAdmin)
class ItemAdmin(admin.ModelAdmin):
list_display=[f.name for f in Item._meta.fields]
admin.site.register(Item, ItemAdmin)
class RecordAdmin(admin.ModelAdmin):
list_display=[f.name for f in Record._meta.fields]
admin.site.register(Record, RecordAdmin) | 35.444444 | 61 | 0.811912 |
08bf22d2829a1e840c09cc430406875ef4c058f0 | 3,490 | py | Python | amadeus/amadeus.py | tsolakoua/amadeus-python | 56c0e5cb0510aab5a80646d07593d94c9cba2c69 | [
"MIT"
] | 125 | 2018-04-09T07:27:24.000Z | 2022-02-22T11:45:20.000Z | amadeus/amadeus.py | tsolakoua/amadeus-python | 56c0e5cb0510aab5a80646d07593d94c9cba2c69 | [
"MIT"
] | 58 | 2018-03-29T14:58:01.000Z | 2022-03-17T10:18:07.000Z | amadeus/amadeus.py | tsolakoua/amadeus-python | 56c0e5cb0510aab5a80646d07593d94c9cba2c69 | [
"MIT"
] | 58 | 2018-04-06T10:56:20.000Z | 2022-03-04T01:23:24.000Z | from .mixins.validator import Validator
from .mixins.http import HTTP
from .mixins.pagination import Pagination
from .namespaces import Core as Namespaces
class Client(Namespaces, Pagination, Validator, HTTP, object):
'''
The Amadeus client library for accessing
the travel APIs.
'''
# The available hosts for this API
HOSTS = {
'test': 'test.api.amadeus.com',
'production': 'api.amadeus.com'
}
# The initialization method for the entire module
def __init__(self, **options):
'''
Initialize using your credentials:
.. code-block:: python
from amadeus import Client
amadeus = Client(
client_id='YOUR_CLIENT_ID',
client_secret='YOUR_CLIENT_SECRET'
)
Alternatively, initialize the library using the environment variables
``AMADEUS_CLIENT_ID`` and ``AMADEUS_CLIENT_SECRET``.
.. code-block:: python
amadeus = amadeus.Client()
:param client_id: the API key used to authenticate the API
:paramtype client_id: str
:param client_secret: the API secret used to authenticate the API
:paramtype client_secret: str
:param logger: (optional) a Python compatible logger
(Default: ``logging.Logger``)
:paramtype logger: logging.Logger
:param log_level: (optional) the log level of the client, either
``"debug"``, ``"warn"``, or ``"silent"`` mode
(Default: ``"silent"``)
:paramtype log_level: str
:param hostname: (optional) the name of the server API calls are made
to, ``"production"`` or ``"test"``. (Default: ``"test"``)
:paramtype hostname: str
:param host: (optional) alternatively, you can specify a full host
domain name instead, e.g. ``"api.example.com"``
:paramtype host: str
:param ssl: if this client is should use HTTPS (Default: ``True``)
:paramtype ssl: bool
:param port: the port this client should use (Default: ``80`` for HTTP
and ``443`` for HTTPS)
:paramtype port: int
:param custom_app_id: (optional) a custom App ID to be passed in
the User Agent to the server (Default: ``None``)
:paramtype custom_app_id: str
:param custom_app_version: (optional) a custom App Version number to
be passed in the User Agent to the server (Default: ``None``)
:paramtype custom_app_version: str
:param http: (optional) a :func:`urllib.request.urlopen` compatible
client that accepts a :class:`urllib.request.Request` compatible
object (Default: ``urlopen``)
:paramtype http: urllib.request.urlopen
:raises ValueError: when a required param is missing
'''
self._initialize_client_credentials(options)
self._initialize_logger(options)
self._initialize_host(options)
self._initialize_custom_app(options)
self._initialize_http(options)
recognized_options = ['client_id', 'client_secret', 'logger', 'host',
'hostname', 'custom_app_id',
'custom_app_version', 'http',
'log_level', 'ssl', 'port']
self._warn_on_unrecognized_options(options, self.logger,
recognized_options)
Namespaces.__init__(self)
| 34.9 | 78 | 0.608596 |
c9b7e4f7e84ec8d2310dae139387101d6d66123a | 295 | py | Python | scratch/test_sw.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 84 | 2021-02-21T23:12:59.000Z | 2022-03-25T21:22:27.000Z | scratch/test_sw.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 37 | 2021-05-20T05:35:13.000Z | 2022-03-13T09:12:16.000Z | scratch/test_sw.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 22 | 2021-03-31T02:57:09.000Z | 2022-03-09T17:30:22.000Z | import dut_cocotb
import cocotb
import time
class Stim():
def __init__(self, max, dut, bfm):
self.max = max
self.bfm = bfm
def numb_gen_test(self):
for ii in range(self.max):
self.bfm.send_num(ii)
time.sleep(5)
self.bfm.done.set()
| 17.352941 | 38 | 0.579661 |
c8e9abf7e763a4767c82c0baf2a1eca3ed24e6e2 | 1,171 | py | Python | ursh/blueprints/api/handlers.py | indico/ursh | 74a570299f70986beb7de9b9749270583f16ba52 | [
"MIT"
] | 5 | 2019-08-08T12:28:47.000Z | 2021-02-01T05:02:11.000Z | ursh/blueprints/api/handlers.py | ThiefMaster/ursh | 74a570299f70986beb7de9b9749270583f16ba52 | [
"MIT"
] | 4 | 2018-07-27T13:37:41.000Z | 2019-05-13T14:42:26.000Z | ursh/blueprints/api/handlers.py | indico/ursh | 74a570299f70986beb7de9b9749270583f16ba52 | [
"MIT"
] | 2 | 2019-08-05T16:26:42.000Z | 2021-01-27T19:53:41.000Z | from flask import current_app, jsonify
def handle_bad_requests(error):
return jsonify({'status': error.code, 'error': error.description}), error.code
def handle_db_errors(error):
return create_error_json(400, 'invalid-input', 'Your input is invalid')
def handle_not_found(error):
return create_error_json(404, 'not-found', error.description.get('message'), args=error.description.get('args'))
def handle_method_not_allowed(error):
return create_error_json(405, 'invalid-method', 'This HTTP method is not allowed')
def handle_conflict(error):
return create_error_json(409, 'conflict', error.description.get('message'), args=error.description.get('args'))
def handle_internal_exceptions(error):
current_app.logger.exception('Unexpected error')
return create_error_json(500, 'internal-error', 'Sorry, something went wrong')
def create_error_json(status_code, error_code, message, **kwargs):
message_dict = {
'status': status_code,
'error': {
'code': error_code,
'description': message
}
}
message_dict['error'].update(kwargs)
return jsonify(message_dict), status_code
| 30.025641 | 116 | 0.716482 |
065f54d57b5c6418b2965567f20d13efeead100d | 5,537 | py | Python | scrapers/modules/dingding.py | skytalemcc/OffshoreNewsHub | 56a12fba8bf740084f988f88134238ab297bb23d | [
"MIT"
] | null | null | null | scrapers/modules/dingding.py | skytalemcc/OffshoreNewsHub | 56a12fba8bf740084f988f88134238ab297bb23d | [
"MIT"
] | null | null | null | scrapers/modules/dingding.py | skytalemcc/OffshoreNewsHub | 56a12fba8bf740084f988f88134238ab297bb23d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import json
import time
import hmac
import hashlib
import base64
if sys.version_info > (3, 0):
from urllib.request import urlopen, Request
from urllib.parse import quote_plus
else:
from urllib2 import urlopen, Request
from urllib import quote_plus
SHOW_AVATAR = "0" # 不隐藏头像
HIDE_AVATAR = "1" # 隐藏头像
BTN_CROSSWISE = "0" # 横向
BTN_LENGTHWAYS = "1" # 纵向
class DingDing(object):
def __init__(self, token):
self.url = self.parse_token(token)
self.headers = {"Content-Type": "application/json"}
self.secret = ""
def set_secret(self, secret):
"""设置签名秘钥
"""
self.secret = secret
def parse_token(self, token):
"""
:param token:
:return:
"""
ding_url_pre = "https://oapi.dingtalk.com/robot/send?access_token=%s"
token = token.strip()
if len(token) == 64:
return ding_url_pre % token
if len(token) == 114:
return token
raise ValueError("token Error")
def send_text(self, text, at_mobiles=[], at_all=False):
"""
例子: send_text('天气不错', ['13333333333'])
:param text: 消息类型,此时固定为:text
:param at_mobiles: 被@人的手机号 ['13333333333', ]
:param at_all: @所有人时:true,否则为:false
:return:
"""
data = {
"msgtype": "text",
"text": {"content": text},
"at": {"atMobiles": at_mobiles, "isAtAll": at_all},
}
return self._post(data)
def send_link(self, title, text, message_url="", pic_url=""):
data = {
"msgtype": "link",
"link": {
"text": text,
"title": title,
"picUrl": pic_url,
"messageUrl": message_url,
},
}
return self._post(data)
def send_markdown(self, title, text, at_mobiles=[], at_all=False):
"""发送markdown格式
:param title: 首屏会话透出的展示内容
:param text: markdown格式的消息
:param at_mobiles: 被@人的手机号(在text内容里要有@手机号)
:param at_all: @所有人时:true,否则为:false
:return:
"""
data = {
"msgtype": "markdown",
"markdown": {"title": title, "text": text},
"at": {"atMobiles": at_mobiles, "isAtAll": at_all},
}
return self._post(data)
def send_single_action_card(
self,
title,
text,
single_title,
single_url,
btn_orientation=BTN_LENGTHWAYS,
hide_avatar=SHOW_AVATAR,
):
"""整体跳转ActionCard类型
:param title: 首屏会话透出的展示内容
:param text: markdown格式的消息
:param single_title: 单个按钮的方案。(设置此项和singleURL后btns无效。)
:param single_url: 点击singleTitle按钮触发的URL
:param btn_orientation: 0-按钮竖直排列,1-按钮横向排列
:param hide_avatar: 0-正常发消息者头像,1-隐藏发消息者头像
:return:
"""
data = {
"actionCard": {
"title": title,
"text": text,
"hideAvatar": hide_avatar,
"btnOrientation": btn_orientation,
"singleTitle": single_title,
"singleURL": single_url,
},
"msgtype": "actionCard",
}
return self._post(data)
def send_action_card(
self, title, text, btns, btn_orientation=BTN_LENGTHWAYS, hide_avatar=SHOW_AVATAR
):
"""独立跳转ActionCard类型
:param title: 首屏会话透出的展示内容
:param text: markdown格式的消息
:param btns: 按钮的信息:title-按钮方案,actionURL-点击按钮触发的URL
:param btn_orientation: 0-按钮竖直排列,1-按钮横向排列
:param hide_avatar: 0-正常发消息者头像,1-隐藏发消息者头像
:return:
"""
btns = [{"title": btn[0], "actionURL": btn[1]} for btn in btns]
data = {
"actionCard": {
"title": title,
"text": text,
"hideAvatar": hide_avatar,
"btnOrientation": btn_orientation,
"btns": btns,
},
"msgtype": "actionCard",
}
return self._post(data)
def send_feed_card(self, rows):
"""FeedCard类型
例子: send_feed_card([('学vue','https://cn.vuejs.org/','https://cn.vuejs.org/images/logo.png'),
('哪家强', 'https://cn.vuejs.org/', 'https://cn.vuejs.org/images/logo.png')])
:param rows: [(title, messageURL, picURL), (...)]
:return:
"""
rows = [
{"title": row[0], "messageURL": row[1], "picURL": row[2]} for row in rows
]
data = {"feedCard": {"links": rows}, "msgtype": "feedCard"}
return self._post(data)
def _post(self, data):
if self.secret:
sign, timestamp = self.get_sign_timestamp()
self.url = self.url + "&sign=" + sign + "×tamp=" + timestamp
data = json.dumps(data)
req = Request(self.url, data=data.encode("utf-8"), headers=self.headers)
response = urlopen(req)
the_page = response.read()
return json.loads(the_page.decode("utf-8"))
def get_sign_timestamp(self):
timestamp = "%d" % (round(time.time() * 1000))
secret_enc = self.secret.encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, self.secret)
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = quote_plus(base64.b64encode(hmac_code))
return sign, timestamp | 30.761111 | 100 | 0.543796 |
7c021e37ae8d27952fdf5d16d8854ca86d865d00 | 1,751 | py | Python | startup/SST/HW/gatevalves.py | NSLS-II-SST/profile_collection | e2c9d1fce421e7ed8a60fb744c34a770f7780803 | [
"BSD-3-Clause"
] | null | null | null | startup/SST/HW/gatevalves.py | NSLS-II-SST/profile_collection | e2c9d1fce421e7ed8a60fb744c34a770f7780803 | [
"BSD-3-Clause"
] | 12 | 2019-05-30T15:08:15.000Z | 2021-04-29T03:24:10.000Z | startup/SST/HW/gatevalves.py | NSLS-II-SST/profile_collection | e2c9d1fce421e7ed8a60fb744c34a770f7780803 | [
"BSD-3-Clause"
] | 2 | 2019-05-23T17:13:04.000Z | 2019-10-20T14:52:05.000Z | from ..CommonFunctions.functions import run_report
from ..Base.valves_and_shutters import EPS_Shutter
run_report(__file__)
gv14 = EPS_Shutter(
"XF:07IDA-VA:2{FS:6-GV:1}", name="Pre Mono Gate Valve", kind="hinted"
)
gv14.shutter_type = "GV"
gv14.openval = 0
gv14.closeval = 1
gv14a = EPS_Shutter("XF:07IDA-VA:2{FS:6-GV:2}", name="Mono Gate Valve", kind="hinted")
gv14a.shutter_type = "GV"
gv14a.openval = 0
gv14a.closeval = 1
gv15 = EPS_Shutter(
"XF:07IDB-VA:2{Mono:PGM-GV:1}", name="Pre Shutter Gate Valve", kind="hinted"
)
gv15.shutter_type = "GV"
gv15.openval = 0
gv15.closeval = 1
gv26 = EPS_Shutter(
"XF:07IDB-VA:2{Mir:M3C-GV:1}", name="Post Shutter Gate Valve", kind="hinted"
)
gv26.shutter_type = "GV"
gv26.openval = 1
gv26.closeval = 0
gv27 = EPS_Shutter(
"XF:07IDB-VA:3{Slt:C-GV:1}", name="Upstream Gate Valve", kind="hinted"
)
gv27.shutter_type = "GV"
gv27.openval = 1
gv27.closeval = 0
gv27a = EPS_Shutter(
"XF:07IDB-VA:2{RSoXS:Main-GV:1}", name="Izero-Main Gate Valve", kind="hinted"
)
gv27a.shutter_type = "GV"
gv27a.openval = 1
gv27a.closeval = 0
gv28 = EPS_Shutter(
"XF:07IDB-VA:2{BT:1-GV:1}", name="Downstream Gate Valve", kind="hinted"
)
gv28.shutter_type = "GV"
gv28.openval = 1
gv28.closeval = 0
gvTEM = EPS_Shutter(
"XF:07IDB-VA:2{RSoXS:Main-GV:2}", name="TEM Load Lock Gate Valve", kind="hinted"
)
gvTEM.shutter_type = "GV"
gvTEM.openval = 0
gvTEM.closeval = 1
gvll = EPS_Shutter(
"XF:07IDB-VA:2{RSoXS:LL-GV:1}", name="Load Lock Gate Valve", kind="hinted"
)
gvll.shutter_type = "GV"
gvll.openval = 0
gvll.closeval = 1
gvturbo = EPS_Shutter(
"XF:07IDB-VA:2{RSoXS:TP-GV:1}", name="Turbo Gate Valve", kind="hinted"
)
gvturbo.shutter_type = "GV"
gvturbo.openval = 0
gvturbo.closeval = 1
| 23.662162 | 86 | 0.691605 |
50e1afe0bee7f904b103478709a98baca075ad32 | 7,069 | py | Python | fairnr/modules/implicit.py | wi1k1n/nrf-accelerations | 3075d63177e8ac04ee91784d5b0c56379335740f | [
"MIT"
] | null | null | null | fairnr/modules/implicit.py | wi1k1n/nrf-accelerations | 3075d63177e8ac04ee91784d5b0c56379335740f | [
"MIT"
] | null | null | null | fairnr/modules/implicit.py | wi1k1n/nrf-accelerations | 3075d63177e8ac04ee91784d5b0c56379335740f | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.utils import get_activation_fn
from fairnr.modules.hyper import HyperFC
from fairnr.modules.module_utils import FCLayer
class BackgroundField(nn.Module):
"""
Background (we assume a uniform color)
"""
def __init__(self, out_dim=3, bg_color="1.0,1.0,1.0", min_color=-1, stop_grad=False, background_depth=5.0):
super().__init__()
if out_dim == 3: # directly model RGB
bg_color = [float(b) for b in bg_color.split(',')] if isinstance(bg_color, str) else [bg_color]
if min_color == -1:
bg_color = [b * 2 - 1 for b in bg_color]
if len(bg_color) == 1:
bg_color = bg_color + bg_color + bg_color
bg_color = torch.tensor(bg_color)
else:
bg_color = torch.ones(out_dim).uniform_()
if min_color == -1:
bg_color = bg_color * 2 - 1
self.out_dim = out_dim
self.bg_color = nn.Parameter(bg_color, requires_grad=not stop_grad)
self.depth = background_depth
def forward(self, x, **kwargs):
return self.bg_color.unsqueeze(0).expand(
*x.size()[:-1], self.out_dim)
class ImplicitField(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=False, with_ln=True, skips=None, spec_init=True):
super().__init__()
self.skips = skips
self.net = []
prev_dim = in_dim
for i in range(num_layers):
next_dim = out_dim if i == (num_layers - 1) else hidden_dim
if (i == (num_layers - 1)) and outmost_linear:
self.net.append(nn.Linear(prev_dim, next_dim))
else:
self.net.append(FCLayer(prev_dim, next_dim, with_ln=with_ln))
prev_dim = next_dim
if (self.skips is not None) and (i in self.skips) and (i != (num_layers - 1)):
prev_dim += in_dim
if num_layers > 0:
self.net = nn.ModuleList(self.net)
if spec_init:
self.net.apply(self.init_weights)
def forward(self, x):
y = self.net[0](x)
for i in range(len(self.net) - 1):
if (self.skips is not None) and (i in self.skips):
y = torch.cat((x, y), dim=-1)
y = self.net[i+1](y)
return y
def init_weights(self, m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
# nn.init.uniform_(m.weight)
class HyperImplicitField(nn.Module):
def __init__(self, hyper_in_dim, in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=False):
super().__init__()
self.hyper_in_dim = hyper_in_dim
self.in_dim = in_dim
self.net = HyperFC(
hyper_in_dim,
1, 256,
hidden_dim,
num_layers,
in_dim,
out_dim,
outermost_linear=outmost_linear
)
def forward(self, x, c):
assert (x.size(-1) == self.in_dim) and (c.size(-1) == self.hyper_in_dim)
if self.nerfpos is not None:
x = torch.cat([x, self.nerfpos(x)], -1)
return self.net(c)(x.unsqueeze(0)).squeeze(0)
class SignedDistanceField(ImplicitField):
"""
Predictor for density or SDF values.
"""
def __init__(self, in_dim, hidden_dim, num_layers=1,
recurrent=False, with_ln=True, spec_init=True):
super().__init__(in_dim, in_dim, in_dim, num_layers-1, with_ln=with_ln, spec_init=spec_init)
self.recurrent = recurrent
if recurrent:
assert num_layers > 1
self.hidden_layer = nn.LSTMCell(input_size=in_dim, hidden_size=hidden_dim)
self.hidden_layer.apply(init_recurrent_weights)
lstm_forget_gate_init(self.hidden_layer)
else:
self.hidden_layer = FCLayer(in_dim, hidden_dim, with_ln) \
if num_layers > 0 else nn.Identity()
prev_dim = hidden_dim if num_layers > 0 else in_dim
self.output_layer = nn.Linear(prev_dim, 1)
def forward(self, x, state=None):
if self.recurrent:
shape = x.size()
state = self.hidden_layer(x.view(-1, shape[-1]), state)
if state[0].requires_grad:
state[0].register_hook(lambda x: x.clamp(min=-5, max=5))
return self.output_layer(state[0].view(*shape[:-1], -1)).squeeze(-1), state
else:
return self.output_layer(self.hidden_layer(x)).squeeze(-1), None
class TextureField(ImplicitField):
"""
Pixel generator based on 1x1 conv networks
"""
def __init__(self, in_dim, hidden_dim, num_layers,
with_alpha=False, with_ln=True, spec_init=True):
out_dim = 3 if not with_alpha else 4
super().__init__(in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=True, with_ln=with_ln, spec_init=spec_init)
class LightTextureField(ImplicitField):
"""
Pixel generator based on 1x1 conv networks
"""
def __init__(self, in_dim, hidden_dim, num_layers,
with_alpha=False, with_ln=True, spec_init=True):
out_dim = 3 if not with_alpha else 4
super().__init__(in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=True, with_ln=with_ln, spec_init=spec_init)
class ExplicitLightTextureField(ImplicitField):
"""
Pixel generator based on 1x1 conv networks
"""
def __init__(self, in_dim, hidden_dim, num_layers,
with_ln=True, spec_init=True, r_dim=4):
out_dim = r_dim
super().__init__(in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=True, with_ln=with_ln, spec_init=spec_init)
def forward(self, x):
y = super().forward(x)
return y
# ------------------ #
# helper functions #
# ------------------ #
def init_recurrent_weights(self):
for m in self.modules():
if type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
nn.init.kaiming_normal_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def lstm_forget_gate_init(lstm_layer):
for name, parameter in lstm_layer.named_parameters():
if not "bias" in name: continue
n = parameter.size(0)
start, end = n // 4, n // 2
parameter.data[start:end].fill_(1.)
def clip_grad_norm_hook(x, max_norm=10):
total_norm = x.norm()
total_norm = total_norm ** (1 / 2.)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
return x * clip_coef | 35.883249 | 111 | 0.597397 |
4e0a7289f1fe396dc33488ba08b4b3818d61b922 | 1,408 | py | Python | app/tests.py | uvElena/evolab-hello | 7e78a7f3b5db811d5ac123762b48afadf7d3ccf0 | [
"MIT"
] | null | null | null | app/tests.py | uvElena/evolab-hello | 7e78a7f3b5db811d5ac123762b48afadf7d3ccf0 | [
"MIT"
] | null | null | null | app/tests.py | uvElena/evolab-hello | 7e78a7f3b5db811d5ac123762b48afadf7d3ccf0 | [
"MIT"
] | null | null | null | from flask_testing import TestCase
from app import create_app, Configuration
from models import db, User
from flask import url_for
class TestConfiguration(Configuration):
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/names.db'
TESTING = True
DEBUG = True
class TestBase(TestCase):
def create_app(self):
app = create_app(TestConfiguration)
return app
def setUp(self):
db.drop_all()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_index(self):
response = self.client.get(url_for('main.index'))
self.assertEqual(response.status_code, 200)
def test_say(self):
response = self.client.post(
url_for('main.say_hi'),
data={'name': 'Олег'}
)
self.assertIn('Привіт, Олег', response.data.decode("utf-8"))
response = self.client.post(
url_for('main.say_hi'),
data={'name': 'Олег'}
)
self.assertIn('Вже бачилися, Олег', response.data.decode("utf-8"))
def test_names(self):
user = User(user_name='Олег')
db.session.add(user)
db.session.commit()
response = self.client.get(
url_for('main.list_names')
)
self.assertIn('Вже бачилися з', response.data.decode("utf-8"))
self.assertIn('Олег', response.data.decode("utf-8"))
| 25.6 | 74 | 0.605114 |
0737d4092eb17450864789128069120ecd7108ef | 981 | py | Python | toughio/relative_permeability/_fatt_klikoff.py | keurfonluu/toughio | 1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f | [
"BSD-3-Clause-LBNL"
] | 21 | 2020-03-05T20:03:58.000Z | 2022-03-14T23:17:42.000Z | toughio/relative_permeability/_fatt_klikoff.py | keurfonluu/toughio | 1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f | [
"BSD-3-Clause-LBNL"
] | 60 | 2020-02-14T22:53:01.000Z | 2022-03-26T07:24:19.000Z | toughio/relative_permeability/_fatt_klikoff.py | keurfonluu/toughio | 1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f | [
"BSD-3-Clause-LBNL"
] | 6 | 2020-02-28T08:15:36.000Z | 2022-03-13T23:26:24.000Z | from ._base import BaseRelativePermeability
__all__ = [
"FattKlikoff",
]
class FattKlikoff(BaseRelativePermeability):
_id = 7
_name = "Fatt-Klikoff"
def __init__(self, slr):
"""
Fatt and Klikoff's function.
After Fatt and Klikoff (1959).
Parameters
----------
slr : scalar
Irreducible liquid saturation (RP(1)).
"""
if slr >= 1.0:
raise ValueError()
self.parameters = [slr]
def _eval(self, sl, *args):
"""Fatt and Klikoff's function."""
(slr,) = args
Seff = (sl - slr) / (1.0 - slr) if sl > slr else 0.0
kl = Seff ** 3
kg = (1.0 - Seff) ** 3
return kl, kg
@property
def parameters(self):
"""Return model parameters."""
return [self._slr]
@parameters.setter
def parameters(self, value):
if len(value) != 1:
raise ValueError()
self._slr = value[0]
| 20.87234 | 60 | 0.517839 |
fb7f0a956bb7501920be59607aa153acd6368496 | 15,291 | py | Python | trove/tests/unittests/guestagent/test_mongodb_manager.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 1 | 2019-09-20T08:31:54.000Z | 2019-09-20T08:31:54.000Z | trove/tests/unittests/guestagent/test_mongodb_manager.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | trove/tests/unittests/guestagent/test_mongodb_manager.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pymongo
import trove.common.db.mongodb.models as models
import trove.common.utils as utils
import trove.guestagent.backup as backup
from trove.guestagent.common.configuration import ImportOverrideStrategy
import trove.guestagent.datastore.experimental.mongodb.manager as manager
import trove.guestagent.datastore.experimental.mongodb.service as service
import trove.guestagent.volume as volume
from trove.tests.unittests.guestagent.test_datastore_manager import \
DatastoreManagerTest
class GuestAgentMongoDBManagerTest(DatastoreManagerTest):
@mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory')
def setUp(self, _):
super(GuestAgentMongoDBManagerTest, self).setUp('mongodb')
self.manager = manager.Manager()
self.execute_with_timeout_patch = mock.patch.object(
utils, 'execute_with_timeout', return_value=('0', '')
)
self.addCleanup(self.execute_with_timeout_patch.stop)
self.execute_with_timeout_patch.start()
self.pymongo_patch = mock.patch.object(
pymongo, 'MongoClient'
)
self.addCleanup(self.pymongo_patch.stop)
self.pymongo_patch.start()
self.mount_point = '/var/lib/mongodb'
self.host_wildcard = '%' # This is used in the test_*_user tests below
self.serialized_user = {
'_name': 'testdb.testuser', '_password': None,
'_roles': [{'db': 'testdb', 'role': 'testrole'}],
'_username': 'testuser', '_databases': [],
'_host': self.host_wildcard,
'_database': {'_name': 'testdb',
'_character_set': None,
'_collate': None},
'_is_root': False
}
def tearDown(self):
super(GuestAgentMongoDBManagerTest, self).tearDown()
def test_update_status(self):
self.manager.app.status = mock.MagicMock()
self.manager.update_status(self.context)
self.manager.app.status.update.assert_any_call()
def _prepare_method(self, packages=['packages'], databases=None,
memory_mb='2048', users=None, device_path=None,
mount_point=None, backup_info=None,
config_contents=None, root_password=None,
overrides=None, cluster_config=None,):
"""self.manager.app must be correctly mocked before calling."""
self.manager.app.status = mock.Mock()
self.manager.prepare(self.context, packages,
databases, memory_mb, users,
device_path=device_path,
mount_point=mount_point,
backup_info=backup_info,
config_contents=config_contents,
root_password=root_password,
overrides=overrides,
cluster_config=cluster_config)
self.manager.app.status.begin_install.assert_any_call()
self.manager.app.install_if_needed.assert_called_with(packages)
self.manager.app.stop_db.assert_any_call()
self.manager.app.clear_storage.assert_any_call()
(self.manager.app.apply_initial_guestagent_configuration.
assert_called_once_with(cluster_config, self.mount_point))
@mock.patch.object(volume, 'VolumeDevice')
@mock.patch('os.path.exists')
def test_prepare_for_volume(self, exists, mocked_volume):
device_path = '/dev/vdb'
self.manager.app = mock.Mock()
self._prepare_method(device_path=device_path)
mocked_volume().unmount_device.assert_called_with(device_path)
mocked_volume().format.assert_any_call()
mocked_volume().migrate_data.assert_called_with(self.mount_point)
mocked_volume().mount.assert_called_with(self.mount_point)
def test_secure(self):
self.manager.app = mock.Mock()
mock_secure = mock.Mock()
self.manager.app.secure = mock_secure
self._prepare_method()
mock_secure.assert_called_with()
@mock.patch.object(backup, 'restore')
@mock.patch.object(service.MongoDBAdmin, 'is_root_enabled')
def test_prepare_from_backup(self, mocked_root_check, mocked_restore):
self.manager.app = mock.Mock()
backup_info = {'id': 'backup_id_123abc',
'location': 'fake-location',
'type': 'MongoDBDump',
'checksum': 'fake-checksum'}
self._prepare_method(backup_info=backup_info)
mocked_restore.assert_called_with(self.context, backup_info,
'/var/lib/mongodb')
mocked_root_check.assert_any_call()
def test_prepare_with_databases(self):
self.manager.app = mock.Mock()
database = mock.Mock()
mock_create_databases = mock.Mock()
self.manager.create_database = mock_create_databases
self._prepare_method(databases=[database])
mock_create_databases.assert_called_with(self.context, [database])
def test_prepare_with_users(self):
self.manager.app = mock.Mock()
user = mock.Mock()
mock_create_users = mock.Mock()
self.manager.create_user = mock_create_users
self._prepare_method(users=[user])
mock_create_users.assert_called_with(self.context, [user])
@mock.patch.object(service.MongoDBAdmin, 'enable_root')
def test_provide_root_password(self, mocked_enable_root):
self.manager.app = mock.Mock()
self._prepare_method(root_password='test_password')
mocked_enable_root.assert_called_with('test_password')
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record')
def test_create_user(self, mocked_get_user, mocked_admin_user,
mocked_client):
user = self.serialized_user.copy()
user['_password'] = 'testpassword'
users = [user]
client = mocked_client().__enter__()['testdb']
mocked_get_user.return_value = None
self.manager.create_user(self.context, users)
client.add_user.assert_called_with('testuser', password='testpassword',
roles=[{'db': 'testdb',
'role': 'testrole'}])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_delete_user(self, mocked_admin_user, mocked_client):
client = mocked_client().__enter__()['testdb']
self.manager.delete_user(self.context, self.serialized_user)
client.remove_user.assert_called_with('testuser')
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_get_user(self, mocked_admin_user, mocked_client):
mocked_find = mock.MagicMock(return_value={
'_id': 'testdb.testuser',
'user': 'testuser', 'db': 'testdb',
'roles': [{'db': 'testdb', 'role': 'testrole'}]
})
client = mocked_client().__enter__().admin
client.system.users.find_one = mocked_find
result = self.manager.get_user(self.context, 'testdb.testuser', None)
mocked_find.assert_called_with({'user': 'testuser', 'db': 'testdb'})
self.assertEqual(self.serialized_user, result)
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_list_users(self, mocked_admin_user, mocked_client):
# roles are NOT returned by list_users
user1 = self.serialized_user.copy()
user2 = self.serialized_user.copy()
user2['_name'] = 'testdb.otheruser'
user2['_username'] = 'otheruser'
user2['_roles'] = [{'db': 'testdb2', 'role': 'readWrite'}]
user2['_databases'] = [{'_name': 'testdb2',
'_character_set': None,
'_collate': None}]
mocked_find = mock.MagicMock(return_value=[
{
'_id': 'admin.os_admin',
'user': 'os_admin', 'db': 'admin',
'roles': [{'db': 'admin', 'role': 'root'}]
},
{
'_id': 'testdb.testuser',
'user': 'testuser', 'db': 'testdb',
'roles': [{'db': 'testdb', 'role': 'testrole'}]
},
{
'_id': 'testdb.otheruser',
'user': 'otheruser', 'db': 'testdb',
'roles': [{'db': 'testdb2', 'role': 'readWrite'}]
}
])
client = mocked_client().__enter__().admin
client.system.users.find = mocked_find
users, next_marker = self.manager.list_users(self.context)
self.assertIsNone(next_marker)
self.assertEqual(sorted([user1, user2], key=lambda x: x['_name']),
users)
@mock.patch.object(service.MongoDBAdmin, 'create_validated_user')
@mock.patch.object(utils, 'generate_random_password',
return_value='password')
def test_enable_root(self, mock_gen_rand_pwd, mock_create_user):
root_user = {'_name': 'admin.root',
'_username': 'root',
'_database': {'_name': 'admin',
'_character_set': None,
'_collate': None},
'_password': 'password',
'_roles': [{'db': 'admin', 'role': 'root'}],
'_databases': [],
'_host': self.host_wildcard,
'_is_root': True}
result = self.manager.enable_root(self.context)
self.assertTrue(mock_create_user.called)
self.assertEqual(root_user, result)
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
return_value=models.MongoDBUser('testdb.testuser'))
def test_grant_access(self, mocked_get_user,
mocked_admin_user, mocked_client):
client = mocked_client().__enter__()['testdb']
self.manager.grant_access(self.context, 'testdb.testuser',
None, ['db1', 'db2', 'db3'])
client.add_user.assert_called_with('testuser', roles=[
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db2', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
return_value=models.MongoDBUser('testdb.testuser'))
def test_revoke_access(self, mocked_get_user,
mocked_admin_user, mocked_client):
client = mocked_client().__enter__()['testdb']
mocked_get_user.return_value.roles = [
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db2', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
]
self.manager.revoke_access(self.context, 'testdb.testuser',
None, 'db2')
client.add_user.assert_called_with('testuser', roles=[
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
return_value=models.MongoDBUser('testdb.testuser'))
def test_list_access(self, mocked_get_user,
mocked_admin_user, mocked_client):
mocked_get_user.return_value.roles = [
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db2', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
]
accessible_databases = self.manager.list_access(
self.context, 'testdb.testuser', None
)
self.assertEqual(['db1', 'db2', 'db3'],
[db['_name'] for db in accessible_databases])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_create_databases(self, mocked_admin_user, mocked_client):
schema = models.MongoDBSchema('testdb').serialize()
db_client = mocked_client().__enter__()['testdb']
self.manager.create_database(self.context, [schema])
# FIXME(songjian):can not create database with null content,
# so create a collection
# db_client['dummy'].insert.assert_called_with({'dummy': True})
# db_client.drop_collection.assert_called_with('dummy')
db_client.create_collection.assert_called_with('dummy')
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_list_databases(self, # mocked_ignored_dbs,
mocked_admin_user, mocked_client):
# This list contains the special 'admin', 'local' and 'config' dbs;
# the special dbs should be skipped in the output.
# Pagination is tested by starting at 'db1', so 'db0' should not
# be in the output. The limit is set to 2, meaning the result
# should be 'db1' and 'db2'. The next_marker should be 'db3'.
mocked_list = mock.MagicMock(
return_value=['admin', 'local', 'config',
'db0', 'db1', 'db2', 'db3'])
mocked_client().__enter__().database_names = mocked_list
dbs, next_marker = self.manager.list_databases(
self.context, limit=2, marker='db1', include_marker=True)
mocked_list.assert_any_call()
self.assertEqual([models.MongoDBSchema('db1').serialize(),
models.MongoDBSchema('db2').serialize()],
dbs)
self.assertEqual('db2', next_marker)
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_delete_database(self, mocked_admin_user, mocked_client):
schema = models.MongoDBSchema('testdb').serialize()
self.manager.delete_database(self.context, schema)
mocked_client().__enter__().drop_database.assert_called_with('testdb')
| 41.104839 | 79 | 0.613171 |
6559c02f0935515675acead288c966c86aaf3a67 | 2,613 | py | Python | language/bert_extraction/steal_bert_classifier/embedding_perturbations/merge_shards.py | wanchenbest/language | 623e016a76eaeb054d09a4173db1026685bc5b08 | [
"Apache-2.0"
] | 1 | 2020-02-12T17:27:05.000Z | 2020-02-12T17:27:05.000Z | language/bert_extraction/steal_bert_classifier/embedding_perturbations/merge_shards.py | wanchenbest/language | 623e016a76eaeb054d09a4173db1026685bc5b08 | [
"Apache-2.0"
] | null | null | null | language/bert_extraction/steal_bert_classifier/embedding_perturbations/merge_shards.py | wanchenbest/language | 623e016a76eaeb054d09a4173db1026685bc5b08 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to combine multiple shards from discrete_invert_embeddings to a single dataset."""
import tensorflow as tf
from tqdm import tqdm
app = tf.compat.v1.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("shards_pattern", None,
"Glob pattern to identify the set of files to be combined")
flags.DEFINE_string("task_name", "mnli",
"Task name to understand the input data format")
flags.DEFINE_string("output_path", None,
"Output path where the combined dataset is exported")
FLAGS = flags.FLAGS
num_labels = {"sst2": 2, "mnli": 3}
relevant_headers = {
"sst2": ["original_index", "sentence"],
"mnli": ["original_index", "sentence1", "sentence2"]
}
def main(_):
task_name = FLAGS.task_name.lower()
# skip the original index header for the final file for compatiblity with
# run_classifier_distillation.py
output_data = ["index\t" + "\t".join(relevant_headers[task_name][1:])]
shards = gfile.Glob(FLAGS.shards_pattern)
# sort the shard according to their starting point
shards.sort(key=lambda x: int(x[x.rfind(".") + 1:x.rfind("-")]))
for shard in tqdm(shards):
logging.info("Loading file %s", shard)
with gfile.Open(shard, "r") as f:
# read the dataset ignoring the header
dataset = f.read().strip().split("\n")
header = dataset[0].split("\t")
dataset = dataset[1:]
relevant_indices = [header.index(x) for x in relevant_headers[task_name]]
logging.info("Dataset size = %d, Relevant indices = %s", len(dataset),
relevant_indices)
for point in dataset:
point_parts = point.split("\t")
output_data.append("\t".join([point_parts[x] for x in relevant_indices]))
logging.info("Final dataset of size %d from %d files",
len(output_data) - 1, len(shards))
with gfile.Open(FLAGS.output_path, "w") as f:
f.write("\n".join(output_data) + "\n")
if __name__ == "__main__":
app.run(main)
| 34.84 | 92 | 0.684654 |
1daeb2e3b47b4743c3b95537f96f065be8b0d51b | 6,976 | py | Python | cherrypy/test/test_logging.py | alan412/cherrypy | 52d16c40032158ab5965251bf750659b3c0f4de3 | [
"BSD-3-Clause"
] | null | null | null | cherrypy/test/test_logging.py | alan412/cherrypy | 52d16c40032158ab5965251bf750659b3c0f4de3 | [
"BSD-3-Clause"
] | null | null | null | cherrypy/test/test_logging.py | alan412/cherrypy | 52d16c40032158ab5965251bf750659b3c0f4de3 | [
"BSD-3-Clause"
] | null | null | null | """Basic tests for the CherryPy core: request handling."""
import logging
import os
from unittest import mock
import pytest
import requests # FIXME: Temporary using it directly, better switch
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.test import helper, logtest
localDir = os.path.dirname(__file__)
access_log = os.path.join(localDir, 'access.log')
error_log = os.path.join(localDir, 'error.log')
# Some unicode strings.
tartaros = ntou('\u03a4\u1f71\u03c1\u03c4\u03b1\u03c1\u03bf\u03c2', 'escape')
erebos = ntou('\u0388\u03c1\u03b5\u03b2\u03bf\u03c2.com', 'escape')
@pytest.fixture
def server():
setup_server()
cherrypy.engine.start()
yield
shutdown_server()
def shutdown_server():
cherrypy.engine.exit()
servers_copy = list(getattr(cherrypy, 'servers', {}).items())
for name, server in servers_copy:
server.unsubscribe()
del cherrypy.servers[name]
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return 'hello'
@cherrypy.expose
def uni_code(self):
cherrypy.request.login = tartaros
cherrypy.request.remote.name = erebos
@cherrypy.expose
def slashes(self):
cherrypy.request.request_line = r'GET /slashed\path HTTP/1.1'
@cherrypy.expose
def whitespace(self):
# User-Agent = "User-Agent" ":" 1*( product | comment )
# comment = "(" *( ctext | quoted-pair | comment ) ")"
# ctext = <any TEXT excluding "(" and ")">
# TEXT = <any OCTET except CTLs, but including LWS>
# LWS = [CRLF] 1*( SP | HT )
cherrypy.request.headers['User-Agent'] = 'Browzuh (1.0\r\n\t\t.3)'
@cherrypy.expose
def as_string(self):
return 'content'
@cherrypy.expose
def as_yield(self):
yield 'content'
@cherrypy.expose
@cherrypy.config(**{'tools.log_tracebacks.on': True})
def error(self):
raise ValueError()
root = Root()
cherrypy.config.update({
'log.error_file': error_log,
'log.access_file': access_log,
})
cherrypy.tree.mount(root)
class AccessLogTests(helper.CPWebCase, logtest.LogCase):
setup_server = staticmethod(setup_server)
logfile = access_log
def testNormalReturn(self):
self.markLog()
self.getPage('/as_string',
headers=[('Referer', 'http://www.cherrypy.org/'),
('User-Agent', 'Mozilla/5.0')])
self.assertBody('content')
self.assertStatus(200)
intro = '%s - - [' % self.interface()
self.assertLog(-1, intro)
if [k for k, v in self.headers if k.lower() == 'content-length']:
self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 7 '
'"http://www.cherrypy.org/" "Mozilla/5.0"'
% self.prefix())
else:
self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 - '
'"http://www.cherrypy.org/" "Mozilla/5.0"'
% self.prefix())
def testNormalYield(self):
self.markLog()
self.getPage('/as_yield')
self.assertBody('content')
self.assertStatus(200)
intro = '%s - - [' % self.interface()
self.assertLog(-1, intro)
if [k for k, v in self.headers if k.lower() == 'content-length']:
self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 7 "" ""' %
self.prefix())
else:
self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 - "" ""'
% self.prefix())
@mock.patch(
'cherrypy._cplogging.LogManager.access_log_format',
'{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}" {o}',
)
def testCustomLogFormat(self):
"""Test a customized access_log_format string, which is a
feature of _cplogging.LogManager.access()."""
self.markLog()
self.getPage('/as_string', headers=[('Referer', 'REFERER'),
('User-Agent', 'USERAGENT'),
('Host', 'HOST')])
self.assertLog(-1, '%s - - [' % self.interface())
self.assertLog(-1, '] "GET /as_string HTTP/1.1" '
'200 7 "REFERER" "USERAGENT" HOST')
@mock.patch(
'cherrypy._cplogging.LogManager.access_log_format',
'{h} {l} {u} {z} "{r}" {s} {b} "{f}" "{a}" {o}',
)
def testTimezLogFormat(self):
"""Test a customized access_log_format string, which is a
feature of _cplogging.LogManager.access()."""
self.markLog()
expected_time = str(cherrypy._cplogging.LazyRfc3339UtcTime())
with mock.patch(
'cherrypy._cplogging.LazyRfc3339UtcTime',
lambda: expected_time):
self.getPage('/as_string', headers=[('Referer', 'REFERER'),
('User-Agent', 'USERAGENT'),
('Host', 'HOST')])
self.assertLog(-1, '%s - - ' % self.interface())
self.assertLog(-1, expected_time)
self.assertLog(-1, ' "GET /as_string HTTP/1.1" '
'200 7 "REFERER" "USERAGENT" HOST')
@mock.patch(
'cherrypy._cplogging.LogManager.access_log_format',
'{i}',
)
def testUUIDv4ParameterLogFormat(self):
"""Test rendering of UUID4 within access log."""
self.markLog()
self.getPage('/as_string')
self.assertValidUUIDv4()
def testEscapedOutput(self):
# Test unicode in access log pieces.
self.markLog()
self.getPage('/uni_code')
self.assertStatus(200)
# The repr of a bytestring includes a b'' prefix
self.assertLog(-1, repr(tartaros.encode('utf8'))[2:-1])
# Test the erebos value. Included inline for your enlightenment.
# Note the 'r' prefix--those backslashes are literals.
self.assertLog(-1, r'\xce\x88\xcf\x81\xce\xb5\xce\xb2\xce\xbf\xcf\x82')
# Test backslashes in output.
self.markLog()
self.getPage('/slashes')
self.assertStatus(200)
self.assertLog(-1, b'"GET /slashed\\path HTTP/1.1"')
# Test whitespace in output.
self.markLog()
self.getPage('/whitespace')
self.assertStatus(200)
# Again, note the 'r' prefix.
self.assertLog(-1, r'"Browzuh (1.0\r\n\t\t.3)"')
def test_tracebacks(server, caplog):
with caplog.at_level(logging.ERROR, logger='cherrypy.error'):
resp = requests.get('http://127.0.0.1:8080/error')
rec = caplog.records[0]
exc_cls, exc_msg = rec.exc_info[0], rec.message
assert 'raise ValueError()' in resp.text
assert 'HTTP' in exc_msg
assert exc_cls is ValueError
| 32.751174 | 79 | 0.557769 |
3f0a867f17dc7d5048a7c558490745488924312a | 601 | py | Python | load_image.py | KirillYabl/Space-instagram | 3b7d6ada3e7d0d3e500578ea450d4aa0ad8cffc2 | [
"MIT"
] | null | null | null | load_image.py | KirillYabl/Space-instagram | 3b7d6ada3e7d0d3e500578ea450d4aa0ad8cffc2 | [
"MIT"
] | null | null | null | load_image.py | KirillYabl/Space-instagram | 3b7d6ada3e7d0d3e500578ea450d4aa0ad8cffc2 | [
"MIT"
] | null | null | null | import requests
def load_image(url, path):
"""Load image by url to file.
Params
--------------------------------------------
:param url: str
Url with image.
:param path: str
Path, where image will be saved.
--------------------------------------------
"""
response = requests.get(url)
# check HTTPError
response.raise_for_status()
# some sites can return 200 and write error in body
if 'error' in response:
raise requests.exceptions.HTTPError(response['error'])
with open(path, 'wb') as f:
f.write(response.content)
| 24.04 | 62 | 0.532446 |
31fdd51148a54f67691ea88534be9eac34f552ff | 7,374 | py | Python | finnhub/models/forex_candles.py | gavinjay/finnhub-python | b5c409dafeda390d14a2b0618ae6f25ab8d76c5b | [
"Apache-2.0"
] | null | null | null | finnhub/models/forex_candles.py | gavinjay/finnhub-python | b5c409dafeda390d14a2b0618ae6f25ab8d76c5b | [
"Apache-2.0"
] | null | null | null | finnhub/models/forex_candles.py | gavinjay/finnhub-python | b5c409dafeda390d14a2b0618ae6f25ab8d76c5b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Finnhub API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from finnhub.configuration import Configuration
class ForexCandles(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'o': 'list[float]',
'h': 'list[float]',
'l': 'list[float]',
'c': 'list[float]',
'v': 'list[float]',
't': 'list[float]',
's': 'str'
}
attribute_map = {
'o': 'o',
'h': 'h',
'l': 'l',
'c': 'c',
'v': 'v',
't': 't',
's': 's'
}
def __init__(self, o=None, h=None, l=None, c=None, v=None, t=None, s=None, local_vars_configuration=None): # noqa: E501
"""ForexCandles - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._o = None
self._h = None
self._l = None
self._c = None
self._v = None
self._t = None
self._s = None
self.discriminator = None
if o is not None:
self.o = o
if h is not None:
self.h = h
if l is not None:
self.l = l
if c is not None:
self.c = c
if v is not None:
self.v = v
if t is not None:
self.t = t
if s is not None:
self.s = s
@property
def o(self):
"""Gets the o of this ForexCandles. # noqa: E501
List of open prices for returned candles. # noqa: E501
:return: The o of this ForexCandles. # noqa: E501
:rtype: list[float]
"""
return self._o
@o.setter
def o(self, o):
"""Sets the o of this ForexCandles.
List of open prices for returned candles. # noqa: E501
:param o: The o of this ForexCandles. # noqa: E501
:type: list[float]
"""
self._o = o
@property
def h(self):
"""Gets the h of this ForexCandles. # noqa: E501
List of high prices for returned candles. # noqa: E501
:return: The h of this ForexCandles. # noqa: E501
:rtype: list[float]
"""
return self._h
@h.setter
def h(self, h):
"""Sets the h of this ForexCandles.
List of high prices for returned candles. # noqa: E501
:param h: The h of this ForexCandles. # noqa: E501
:type: list[float]
"""
self._h = h
@property
def l(self):
"""Gets the l of this ForexCandles. # noqa: E501
List of low prices for returned candles. # noqa: E501
:return: The l of this ForexCandles. # noqa: E501
:rtype: list[float]
"""
return self._l
@l.setter
def l(self, l):
"""Sets the l of this ForexCandles.
List of low prices for returned candles. # noqa: E501
:param l: The l of this ForexCandles. # noqa: E501
:type: list[float]
"""
self._l = l
@property
def c(self):
"""Gets the c of this ForexCandles. # noqa: E501
List of close prices for returned candles. # noqa: E501
:return: The c of this ForexCandles. # noqa: E501
:rtype: list[float]
"""
return self._c
@c.setter
def c(self, c):
"""Sets the c of this ForexCandles.
List of close prices for returned candles. # noqa: E501
:param c: The c of this ForexCandles. # noqa: E501
:type: list[float]
"""
self._c = c
@property
def v(self):
"""Gets the v of this ForexCandles. # noqa: E501
List of volume data for returned candles. # noqa: E501
:return: The v of this ForexCandles. # noqa: E501
:rtype: list[float]
"""
return self._v
@v.setter
def v(self, v):
"""Sets the v of this ForexCandles.
List of volume data for returned candles. # noqa: E501
:param v: The v of this ForexCandles. # noqa: E501
:type: list[float]
"""
self._v = v
@property
def t(self):
"""Gets the t of this ForexCandles. # noqa: E501
List of timestamp for returned candles. # noqa: E501
:return: The t of this ForexCandles. # noqa: E501
:rtype: list[float]
"""
return self._t
@t.setter
def t(self, t):
"""Sets the t of this ForexCandles.
List of timestamp for returned candles. # noqa: E501
:param t: The t of this ForexCandles. # noqa: E501
:type: list[float]
"""
self._t = t
@property
def s(self):
"""Gets the s of this ForexCandles. # noqa: E501
Status of the response. This field can either be ok or no_data. # noqa: E501
:return: The s of this ForexCandles. # noqa: E501
:rtype: str
"""
return self._s
@s.setter
def s(self, s):
"""Sets the s of this ForexCandles.
Status of the response. This field can either be ok or no_data. # noqa: E501
:param s: The s of this ForexCandles. # noqa: E501
:type: str
"""
self._s = s
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForexCandles):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ForexCandles):
return True
return self.to_dict() != other.to_dict()
| 25.340206 | 124 | 0.532818 |
7e027d6e73033544a27a309e29f96dea20bd14cf | 1,386 | py | Python | tests/test_hrm.py | MirkoLedda/polyoligo | c9fc952fbc7315f426a137313fb36cd16a5e5957 | [
"BSD-2-Clause"
] | 3 | 2019-07-26T20:09:50.000Z | 2022-01-11T00:56:45.000Z | tests/test_hrm.py | MirkoLedda/polyoligo | c9fc952fbc7315f426a137313fb36cd16a5e5957 | [
"BSD-2-Clause"
] | 1 | 2021-04-21T13:27:45.000Z | 2021-04-21T13:27:45.000Z | tests/test_hrm.py | MirkoLedda/polyoligo | c9fc952fbc7315f426a137313fb36cd16a5e5957 | [
"BSD-2-Clause"
] | 2 | 2020-02-10T22:34:15.000Z | 2022-03-01T21:29:01.000Z | import sys
import os
import yaml
sys.path.insert(0, os.path.abspath("."))
from src.polyoligo import cli_hrm
with open("tests/KWARGS.yaml", "r") as f:
KWARGS = yaml.safe_load(f)
cli_hrm.main(strcmd=" ".join([
"polyoligo-hrm",
KWARGS["marker"],
KWARGS["out"],
KWARGS["reference"],
"--webapp",
"--debug",
]))
cli_hrm.main(strcmd=" ".join([
"polyoligo-hrm",
KWARGS["marker"],
KWARGS["out"],
KWARGS["ref_fasta"],
]))
cli_hrm.main(strcmd=" ".join([
"polyoligo-hrm",
KWARGS["marker_indels"],
KWARGS["out"],
KWARGS["reference"],
"--webapp",
"--debug",
]))
cli_hrm.main(strcmd=" ".join([
"polyoligo-hrm",
KWARGS["marker_tomato"],
KWARGS["out"],
KWARGS["ref_tomato"],
"--webapp",
"--debug"
]))
cli_hrm.main(strcmd=" ".join([
"polyoligo-hrm",
KWARGS["marker_lim"],
KWARGS["out"],
KWARGS["reference"],
"-n {}".format(KWARGS["n"]),
"--vcf {}".format(KWARGS["vcf"]),
"--vcf_include {}".format(KWARGS["vcf_include"]),
"--depth {}".format(KWARGS["depth"]),
"--tm_delta {}".format(KWARGS["tm_delta"]),
"--seed {}".format(KWARGS["seed"]),
"--offtarget_min_size {}".format(KWARGS["offtarget_min_size"]),
"--offtarget_max_size {}".format(KWARGS["offtarget_max_size"]),
"--primer3 {}".format(KWARGS["primer3_hrm"]),
"-nt {}".format(KWARGS["nt"]),
]))
| 22.354839 | 67 | 0.580808 |
cddd6da012d261dcc9c3eed5fcba9bd8ece70578 | 8,406 | py | Python | test/test_document.py | vanatteveldt/corenlp-xml-lib | b658ccc5d54901f4d4bcf6a540d7ef69fb853e78 | [
"Apache-2.0"
] | 7 | 2015-08-20T20:43:43.000Z | 2018-12-17T01:32:03.000Z | test/test_document.py | vanatteveldt/corenlp-xml-lib | b658ccc5d54901f4d4bcf6a540d7ef69fb853e78 | [
"Apache-2.0"
] | 3 | 2016-05-15T11:30:41.000Z | 2017-06-07T23:24:45.000Z | test/test_document.py | vanatteveldt/corenlp-xml-lib | b658ccc5d54901f4d4bcf6a540d7ef69fb853e78 | [
"Apache-2.0"
] | 4 | 2015-09-05T04:57:07.000Z | 2018-08-07T13:55:29.000Z | import os
import sys
sys.path.insert(0, os.path.join(".."))
import unittest
from corenlp_xml.document import Document, Sentence, Token, TokenList
from corenlp_xml.dependencies import DependencyNode
from collections import OrderedDict
from nltk import Tree
class TestDocument(unittest.TestCase):
def setUp(self):
with open("test.xml", "r") as xml_file:
self._document = Document(xml_file.read())
def test_sentiment(self):
self.assertIsNone(self._document._sentiment, "Sentiment should be lazy-loaded")
expected = 1.2173913043478262
self.assertEquals(expected, self._document.sentiment, "Sentiment should be returned for public property")
self.assertIsNotNone(self._document._sentiment, "Sentiment should be memoized")
self.assertEquals(expected, self._document._sentiment, "Sentiment should be memoized")
def test_sentences(self):
self.assertIsNone(self._document._sentences_dict, "Sentences should be lazy-loaded")
sentences = self._document._get_sentences_dict().values()
self.assertIsNotNone(self._document._sentences_dict, "Sentences should be memoized")
self.assertGreater(len(sentences), 0, "We should have sentences")
for sentence in sentences:
self.assertIsInstance(sentence, Sentence, "Sentences should be a list of only sentences")
self.assertEquals(self._document.sentences, sentences, "Sentences property should work")
self.assertIsInstance(self._document._sentences_dict, OrderedDict, "Protected sentences should be ordered")
def test_get_sentence_by_id(self):
sentence = self._document.get_sentence_by_id(1)
self.assertIsInstance(sentence, Sentence, "Should return a Sentence instance")
self.assertEquals(sentence.id, 1, "Sentence returned should have the appropriate ID")
self.assertIsNone(self._document.get_sentence_by_id(-1), "If the ID doesn't exist, we should get None")
class TestSentence(unittest.TestCase):
""" Tests the Sentence class """
def setUp(self):
""" It would probably be a good idea to look into Mock, eventually """
with open("test.xml", "r") as xml_file:
self._document = Document(xml_file.read())
self._sentence = self._document.sentences[0]
def test_id(self):
""" Value isn't None because it's initialized when you create a Document. Hence why mocks might be nice. """
self.assertEquals(1, self._sentence.id, "ID should be an int")
self.assertIsNotNone(self._sentence._id, "id property should be memoized")
def test_sentiment(self):
self.assertIsNone(self._sentence._sentiment, "Sentiment should be lazy-loaded")
self.assertEquals(1, self._sentence.sentiment, "Sentiment should be an int")
def test_tokens(self):
self.assertIsNone(self._sentence._tokens_dict, "Tokens should be lazy-loaded")
self.assertGreater(len(self._sentence.tokens), 0, "Tokens should be generated")
self.assertIsInstance(self._sentence.tokens, TokenList, "Tokens should be a tokenlist")
self.assertEquals(str(self._sentence.tokens),
" ".join([token.word for token in self._sentence.tokens]),
"toString function of tokenlist should be words")
for token in self._sentence.tokens:
self.assertIsInstance(token, Token, "Tokens should all be of class Token")
self.assertIsNotNone(self._sentence._tokens_dict, "Tokens should be memoized")
self.assertIsInstance(self._sentence._tokens_dict, OrderedDict, "Protected tokens should be ordered")
def test_head(self):
self.assertIsInstance(self._sentence.semantic_head, DependencyNode)
self.assertEquals(self._sentence.semantic_head.text, "demonstrates")
def test_phrase_strings(self):
self.assertIn("a flawed property", self._sentence.phrase_strings("NP"))
def test_subtrees_for_phrase(self):
t = self._sentence.subtrees_for_phrase("NP")[0]
self.assertIsInstance(t, Tree)
self.assertEquals("property", t[-1].leaves()[0])
def test_get_token_by_id(self):
token = self._sentence.get_token_by_id(1)
self.assertIsInstance(token, Token, "Should return a Token instance")
self.assertEquals(token.id, 1, "Token returned should have the appropriate ID")
self.assertIsNone(self._sentence.get_token_by_id(-1), "If the ID doesn't exist, we should get None")
def test_parse(self):
self.assertIsNone(self._sentence._parse, "Parse should be lazy-loaded")
parse = self._sentence.parse
self.assertIsInstance(parse, Tree, "Parse should be an nltk.Tree instance")
self.assertIsInstance(self._sentence._parse, Tree, "Parse should be memoized")
class TestToken(unittest.TestCase):
""" Tests the Token class """
def setUp(self):
""" It would probably be a good idea to look into Mock, eventually """
with open("test.xml", "r") as xml_file:
self._document = Document(xml_file.read())
self._sentence = self._document.sentences[0]
self._token = self._sentence.tokens[0]
def test_id(self):
""" Value isn't None because it's initialized when getting tokens from sent. Hence why mocks might be nice. """
self.assertIsNotNone(self._token._id, "id property should be memoized")
self.assertEquals(1, self._token.id)
def test_word(self):
self.assertIsNone(self._token._word, "Word should be lazy-loaded")
# BREAKAGE WARNING: depends on the current state of xml file test.xml
self.assertEquals("Taking", self._token.word, "Word should be string value of word")
self.assertIsNotNone(self._token._word, "Word property should be memoized")
def test_lemma(self):
self.assertIsNone(self._token._lemma, "Lemma should be lazy-loaded")
# BREAKAGE WARNING: depends on the current state of xml file test.xml
self.assertEquals("take", self._token.lemma, "Word should be string value of word")
self.assertIsNotNone(self._token._lemma, "Lemma property should be memoized")
def test_character_offset_begin(self):
self.assertIsNone(self._token._character_offset_begin, "Character offset being should be lazy-loaded")
self.assertIsNotNone(self._token.character_offset_begin, "Character offset should be accessible")
self.assertIsNotNone(self._token._character_offset_begin, "Character offset property should be memoized")
def test_character_offset_end(self):
self.assertIsNone(self._token._character_offset_end, "Character offset being should be lazy-loaded")
self.assertIsNotNone(self._token.character_offset_end, "Character offset should be accessible")
self.assertIsNotNone(self._token._character_offset_end, "Character offset property should be memoized")
def test_pos(self):
self.assertIsNone(self._token._pos, "POS should be lazy-loaded")
# BREAKAGE WARNING: depends on the current state of xml file test.xml
self.assertEquals("VBG", self._token.pos, "POS should be a POS, yo")
self.assertIsNotNone(self._token._pos, "POS property should be memoized")
def test_ner(self):
self.assertIsNone(self._token._ner, "NER should be lazy-loaded")
# BREAKAGE WARNING: depends on the current state of xml file test.xml
self.assertEquals("O", self._token.ner, "NER should be a NER, yo")
self.assertIsNotNone(self._token._ner, "NER property should be memoized")
def test_speaker(self):
self.assertIsNone(self._token._speaker, "Speaker should be lazy-loaded")
# BREAKAGE WARNING: depends on the current state of xml file test.xml
self.assertEquals("PER0", self._token.speaker, "Speaker should be a speaker son")
self.assertIsNotNone(self._token._speaker, "speaker property should be memoized")
def suite():
"""
Generates test suite
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestDocument))
test_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSentence))
test_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestToken))
return test_suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite()) | 50.945455 | 119 | 0.711516 |
3a4a9f497426f43921aed2082d095fc4cb18f59e | 4,930 | py | Python | src/models/utils.py | matejklemen/pcl-detection-semeval2022t4 | f293dc5a5d248e3ac7e0a52ff4f928f53294749f | [
"MIT"
] | null | null | null | src/models/utils.py | matejklemen/pcl-detection-semeval2022t4 | f293dc5a5d248e3ac7e0a52ff4f928f53294749f | [
"MIT"
] | null | null | null | src/models/utils.py | matejklemen/pcl-detection-semeval2022t4 | f293dc5a5d248e3ac7e0a52ff4f928f53294749f | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
from transformers import BertTokenizerFast, DistilBertTokenizerFast, RobertaTokenizerFast, XLMRobertaTokenizerFast, XLNetTokenizerFast
KEYWORDS = ["migrant", "women", "vulnerable", "refugee", "homeless",
"immigrant", "in-need", "disabled", "hopeless", "poor-families"]
def bracketed_representation(tag):
# Converts a tag (e.g. UPOS, NER, sentiment) into unified scheme: tag -> [TAG]
return f"[{tag.upper()}]"
NER_TAGS = list(map(bracketed_representation,
["O",
"B-ORG", "I-ORG", "E-ORG", "S-ORG",
"B-PER", "I-PER", "E-PER", "S-PER",
"B-LOC", "I-LOC", "E-LOC", "S-LOC",
"B-MISC", "I-MISC", "E-MISC", "S-MISC"]))
NER_SEQ_TAGS = list(map(bracketed_representation,
["ORG", "/ORG", "PER", "/PER", "LOC", "/LOC", "MISC", "/MISC"]))
UPOS_TAGS = list(map(bracketed_representation,
["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN",
"PUNCT", "SCONJ", "SYM", "VERB", "X"]))
# Penn treebank tags
XPOS_TAGS = list(map(bracketed_representation,
["#", "$", "''", ",", "-LRB-", "-RRB-", ".", ":", "AFX", "CC", "CD", "DT", "EX", "FW", "HYPH",
"IN", "JJ", "JJR", "JJS", "LS", "MD", "NIL", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP",
"PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ",
"WDT", "WP", "WP$", "WRB", "``"]))
DEPREL_TAGS = list(map(bracketed_representation,
["acl", "acl:relcl", "advcl", "advmod", "advmod:emph", "advmod:lmod", "amod", "appos", "aux", "aux:pass",
"case", "cc", "cc:preconj", "ccomp", "clf", "compound", "compound:lvc", "compound:prt", "compound:redup", "compound:svc",
"conj", "cop", "csubj", "csubj:pass", "dep", "det", "det:numgov", "det:nummod", "det:poss", "discourse",
"dislocated", "expl", "expl:impers", "expl:pass", "expl:pv", "fixed", "flat", "flat:foreign", "flat:name",
"goeswith", "iobj", "list", "mark", "nmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubj:pass", "nummod", "nummod:gov",
"obj", "obl", "obl:agent", "obl:arg", "obl:lmod", "obl:tmod", "orphan", "parataxis", "punct", "reparandum",
"root", "vocative", "xcomp"]))
# Max 13 entities in training set (50 is way more than enough)
MAX_ENTITIES_IN_DOC = 50
COREF_ENTITY_TAGS = list(map(bracketed_representation,
["O"] + [f"ENTITY{_i}" for _i in range(MAX_ENTITIES_IN_DOC)]))
COREF_SEQ_ENTITY_TAGS = list(map(bracketed_representation, [f"ENTITY{_i}" for _i in range(MAX_ENTITIES_IN_DOC)])) + \
list(map(bracketed_representation, [f"/ENTITY{_i}" for _i in range(MAX_ENTITIES_IN_DOC)]))
SENTIMENT_TAGS = list(map(bracketed_representation,
["NEG_SENT", "OBJ_SENT", "POS_SENT", "UNK_SENT"]))
SENTENCE_SENTIMENT_TAGS = list(map(bracketed_representation,
["NEGATIVE", "NEUTRAL", "POSITIVE"]))
def load_fast_tokenizer(tokenizer_type, pretrained_name_or_path):
# There is no AutoTokenizerFast??
assert tokenizer_type in ["bert", "distilbert", "roberta", "xlm-roberta", "xlnet"]
if tokenizer_type == "bert":
return BertTokenizerFast.from_pretrained(pretrained_name_or_path)
elif tokenizer_type == "distilbert":
return DistilBertTokenizerFast.from_pretrained(pretrained_name_or_path)
elif tokenizer_type == "roberta":
return RobertaTokenizerFast.from_pretrained(pretrained_name_or_path)
elif tokenizer_type == "xlm-roberta":
return XLMRobertaTokenizerFast.from_pretrained(pretrained_name_or_path)
elif tokenizer_type == "xlnet":
return XLNetTokenizerFast.from_pretrained(pretrained_name_or_path)
def optimize_threshold(y_true, y_proba_pos, validated_metric):
assert validated_metric in ["p_score", "r_score", "f1_score"]
if validated_metric == "p_score":
metric_fn = precision_score
elif validated_metric == "r_score":
metric_fn = recall_score
else:
metric_fn = f1_score
valid_thresholds = sorted(list(set(y_proba_pos)))
best_thresh, best_metric_value = None, 0.0
for curr_thresh in valid_thresholds:
curr_preds = (y_proba_pos >= curr_thresh).astype(np.int32)
curr_metric_value = metric_fn(y_true=y_true, y_pred=curr_preds,
pos_label=1, average='binary')
if curr_metric_value > best_metric_value:
best_metric_value = curr_metric_value
best_thresh = curr_thresh
return best_thresh, best_metric_value
| 53.010753 | 145 | 0.591684 |
41d33f0d867059e27d7d311027fd1a0f0bf5501a | 5,465 | py | Python | testing/cffi0/test_zintegration.py | balabit-deps/balabit-os-6-python-cffi | 607e8550b1e90f0e5a6ec757257cdc6cb8f61fc8 | [
"MIT"
] | 163 | 2016-03-18T21:32:05.000Z | 2021-11-08T08:46:22.000Z | testing/cffi0/test_zintegration.py | balabit-deps/balabit-os-6-python-cffi | 607e8550b1e90f0e5a6ec757257cdc6cb8f61fc8 | [
"MIT"
] | 7 | 2016-04-03T18:11:23.000Z | 2018-04-16T18:06:43.000Z | testing/cffi0/test_zintegration.py | balabit-deps/balabit-os-6-python-cffi | 607e8550b1e90f0e5a6ec757257cdc6cb8f61fc8 | [
"MIT"
] | 22 | 2016-03-19T12:21:41.000Z | 2019-12-05T13:02:42.000Z | import py, os, sys, shutil
import subprocess
from testing.udir import udir
if sys.platform == 'win32':
py.test.skip('snippets do not run on win32')
if sys.version_info < (2, 7):
py.test.skip('fails e.g. on a Debian/Ubuntu which patches virtualenv'
' in a non-2.6-friendly way')
def create_venv(name):
tmpdir = udir.join(name)
try:
subprocess.check_call(['virtualenv', '--distribute',
'-p', os.path.abspath(sys.executable),
str(tmpdir)])
except OSError as e:
py.test.skip("Cannot execute virtualenv: %s" % (e,))
site_packages = None
for dirpath, dirnames, filenames in os.walk(str(tmpdir)):
if os.path.basename(dirpath) == 'site-packages':
site_packages = dirpath
break
paths = ""
if site_packages:
try:
from cffi import _pycparser
modules = ('cffi', '_cffi_backend')
except ImportError:
modules = ('cffi', '_cffi_backend', 'pycparser')
try:
import ply
except ImportError:
pass
else:
modules += ('ply',) # needed for older versions of pycparser
paths = []
for module in modules:
target = __import__(module, None, None, [])
if not hasattr(target, '__file__'): # for _cffi_backend on pypy
continue
src = os.path.abspath(target.__file__)
for end in ['__init__.pyc', '__init__.pyo', '__init__.py']:
if src.lower().endswith(end):
src = src[:-len(end)-1]
break
paths.append(os.path.dirname(src))
paths = os.pathsep.join(paths)
return tmpdir, paths
SNIPPET_DIR = py.path.local(__file__).join('..', 'snippets')
def really_run_setup_and_program(dirname, venv_dir_and_paths, python_snippet):
venv_dir, paths = venv_dir_and_paths
def remove(dir):
dir = str(SNIPPET_DIR.join(dirname, dir))
shutil.rmtree(dir, ignore_errors=True)
remove('build')
remove('__pycache__')
for basedir in os.listdir(str(SNIPPET_DIR.join(dirname))):
remove(os.path.join(basedir, '__pycache__'))
olddir = os.getcwd()
python_f = udir.join('x.py')
python_f.write(py.code.Source(python_snippet))
try:
os.chdir(str(SNIPPET_DIR.join(dirname)))
if os.name == 'nt':
bindir = 'Scripts'
else:
bindir = 'bin'
vp = str(venv_dir.join(bindir).join('python'))
env = os.environ.copy()
env['PYTHONPATH'] = paths
subprocess.check_call((vp, 'setup.py', 'clean'), env=env)
subprocess.check_call((vp, 'setup.py', 'install'), env=env)
subprocess.check_call((vp, str(python_f)), env=env)
finally:
os.chdir(olddir)
def run_setup_and_program(dirname, python_snippet):
venv_dir = create_venv(dirname + '-cpy')
really_run_setup_and_program(dirname, venv_dir, python_snippet)
#
sys._force_generic_engine_ = True
try:
venv_dir = create_venv(dirname + '-gen')
really_run_setup_and_program(dirname, venv_dir, python_snippet)
finally:
del sys._force_generic_engine_
# the two files lextab.py and yacctab.py are created by not-correctly-
# installed versions of pycparser.
assert not os.path.exists(str(SNIPPET_DIR.join(dirname, 'lextab.py')))
assert not os.path.exists(str(SNIPPET_DIR.join(dirname, 'yacctab.py')))
class TestZIntegration(object):
def teardown_class(self):
if udir.isdir():
udir.remove(ignore_errors=True)
udir.ensure(dir=1)
def test_infrastructure(self):
run_setup_and_program('infrastructure', '''
import snip_infrastructure
assert snip_infrastructure.func() == 42
''')
def test_distutils_module(self):
run_setup_and_program("distutils_module", '''
import snip_basic_verify
p = snip_basic_verify.C.getpwuid(0)
assert snip_basic_verify.ffi.string(p.pw_name) == b"root"
''')
def test_distutils_package_1(self):
run_setup_and_program("distutils_package_1", '''
import snip_basic_verify1
p = snip_basic_verify1.C.getpwuid(0)
assert snip_basic_verify1.ffi.string(p.pw_name) == b"root"
''')
def test_distutils_package_2(self):
run_setup_and_program("distutils_package_2", '''
import snip_basic_verify2
p = snip_basic_verify2.C.getpwuid(0)
assert snip_basic_verify2.ffi.string(p.pw_name) == b"root"
''')
def test_setuptools_module(self):
run_setup_and_program("setuptools_module", '''
import snip_setuptools_verify
p = snip_setuptools_verify.C.getpwuid(0)
assert snip_setuptools_verify.ffi.string(p.pw_name) == b"root"
''')
def test_setuptools_package_1(self):
run_setup_and_program("setuptools_package_1", '''
import snip_setuptools_verify1
p = snip_setuptools_verify1.C.getpwuid(0)
assert snip_setuptools_verify1.ffi.string(p.pw_name) == b"root"
''')
def test_setuptools_package_2(self):
run_setup_and_program("setuptools_package_2", '''
import snip_setuptools_verify2
p = snip_setuptools_verify2.C.getpwuid(0)
assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root"
''')
| 36.677852 | 78 | 0.621958 |
ae3a0a1af8dc23f7ceef4f1add7744e2f500c200 | 10,488 | py | Python | sigpy/mri/rf/ptx.py | sickkids-mri/sigpy | 350ab6a9f916050f73fca9af711722f87e3fda43 | [
"BSD-3-Clause"
] | null | null | null | sigpy/mri/rf/ptx.py | sickkids-mri/sigpy | 350ab6a9f916050f73fca9af711722f87e3fda43 | [
"BSD-3-Clause"
] | null | null | null | sigpy/mri/rf/ptx.py | sickkids-mri/sigpy | 350ab6a9f916050f73fca9af711722f87e3fda43 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""MRI RF excitation pulse design functions,
including SLR and small tip spatial design
"""
import sigpy as sp
import numpy as np
from sigpy.mri import rf as rf
from sigpy import backend
from scipy.interpolate import interp1d
__all__ = ['stspa', 'stspk']
def stspa(target, sens, coord, dt, roi=None, alpha=0, b0=None, tseg=None,
st=None, phase_update_interval=float('inf'), explicit=False,
max_iter=1000, tol=1E-6):
"""Small tip spatial domain method for multicoil parallel excitation.
Allows for constrained or unconstrained designs.
Args:
target (array): desired magnetization profile. [dim dim]
sens (array): sensitivity maps. [Nc dim dim]
coord (array): coordinates for noncartesian trajectories. [Nt 2]
dt (float): hardware sampling dwell time.
roi (array): array for error weighting, specify spatial ROI. [dim dim]
alpha (float): regularization term, if unconstrained.
b0 (array): B0 inhomogeneity map [dim dim]. For explicit matrix
building.
tseg (None or Dictionary): parameters for time-segmented off-resonance
correction. Parameters are 'b0' (array), 'dt' (float),
'lseg' (int), and 'n_bins' (int). Lseg is the number of
time segments used, and n_bins is the number of histogram bins.
st (None or Dictionary): 'subject to' constraint parameters. Parameters
are avg power 'cNorm' (float), peak power 'cMax' (float),
'mu' (float), 'rhoNorm' (float), 'rhoMax' (float), 'cgiter' (int),
'max_iter' (int), 'L' (list of arrays), 'c' (float), 'rho' (float),
and 'lam' (float). These parameters are explained in detail in the
SDMM documentation.
phase_update_interval (int): number of iters between exclusive phase
updates. If 0, no phase updates performed.
explicit (bool): Use explicit matrix.
max_iter (int): max number of iterations.
tol (float): allowable error.
Returns:
array: pulses out.
References:
Grissom, W., Yip, C., Zhang, Z., Stenger, V. A., Fessler, J. A.
& Noll, D. C.(2006).
Spatial Domain Method for the Design of RF Pulses in Multicoil
Parallel Excitation. Magnetic resonance in medicine, 56, 620-629.
"""
Nc = sens.shape[0]
Nt = coord.shape[0]
device = backend.get_device(target)
xp = device.xp
with device:
pulses = xp.zeros((Nc, Nt), xp.complex)
# set up the system matrix
if explicit:
A = rf.linop.PtxSpatialExplicit(sens, coord, dt,
target.shape, b0)
else:
A = sp.mri.linop.Sense(sens, coord, weights=None, tseg=tseg,
ishape=target.shape).H
# handle the Ns * Ns error weighting ROI matrix
W = sp.linop.Multiply(A.oshape, xp.ones(target.shape))
if roi is not None:
W = sp.linop.Multiply(A.oshape, roi)
# apply ROI
A = W * A
# Unconstrained, use conjugate gradient
if st is None:
I = sp.linop.Identity((Nc, coord.shape[0]))
b = A.H * W * target
alg_method = sp.alg.ConjugateGradient(A.H * A + alpha * I,
b, pulses, P=None,
max_iter=max_iter, tol=tol)
# Constrained case, use SDMM
else:
# vectorize target for SDMM
target = W * target
d = xp.expand_dims(target.flatten(), axis=0)
alg_method = sp.alg.SDMM(A, d, st['lam'], st['L'], st['c'],
st['mu'], st['rho'], st['rhoMax'],
st['rhoNorm'], 10**-5, 10**-2, st['cMax'],
st['cNorm'], st['cgiter'], st['max_iter'])
# perform the design: apply optimization method to find solution pulse
while not alg_method.done():
# phase_update switch
if (alg_method.iter > 0) and \
(alg_method.iter % phase_update_interval == 0):
target = xp.abs(target) * xp.exp(
1j * xp.angle(
xp.reshape(A * alg_method.x, target.shape)))
b = A.H * target
alg_method.b = b
alg_method.update()
if st is not None:
pulses = xp.reshape(alg_method.x, [Nc, Nt])
return pulses
def stspk(mask, sens, n_spokes, fov, dx_max, gts, sl_thick, tbw, dgdtmax, gmax,
alpha=1, iter_dif=0.01):
"""Small tip spokes parallel transmit pulse designer.
Args:
mask (ndarray): region in which to optimize flip angle uniformity
in slice. [dim dim]
sens (ndarray): sensitivity maps. [nc dim dim]
n_spokes (int): number of spokes to be created in the design.
fov (float): excitation FOV (cm).
dx_max (float): max. resolution of the trajectory (cm).
gts (float): hardware sampling dwell time (s).
sl_thick (float): slice thickness (mm).
tbw (int): time-bandwidth product.
dgdtmax (float): max gradient slew (g/cm/s).
gmax (float): max gradient amplitude (g/cm).
alpha (float): regularization parameter.
iter_dif (float): for each spoke, the difference in cost btwn.
successive iterations at which to terminate MLS iterations.
Returns:
2-element tuple containing
- **pulses** (*array*): RF waveform out.
- **g** (*array*): corresponding gradient, in g/cm.
References:
Grissom, W., Khalighi, M., Sacolick, L., Rutt, B. & Vogel, M (2012).
Small-tip-angle spokes pulse design using interleaved greedy and
local optimization methods. Magnetic Resonance in Medicine, 68(5),
1553-62.
"""
device = backend.get_device(sens)
xp = device.xp
with device:
nc = sens.shape[0]
kmax = 1 / dx_max # /cm, max spatial freq of trajectory
# greedy kx, ky grid
kxs, kys = xp.meshgrid(xp.linspace(-kmax / 2, kmax / 2 - 1 / fov,
xp.int(fov * kmax)),
xp.linspace(-kmax / 2, kmax / 2 - 1 / fov,
xp.int(fov * kmax)))
# vectorize the grid
kxs = kxs.flatten()
kys = kys.flatten()
# remove DC
dc = xp.intersect1d(xp.where((kxs == 0)), xp.where((kys == 0)))[0]
kxs = xp.concatenate([kxs[:dc], kxs[dc+1:]])
kys = xp.concatenate([kys[:dc], kys[dc+1:]])
# step 2: design the weights
# initial kx/ky location is DC
k = xp.expand_dims(xp.array([0, 0]), 0)
# initial target phase
phs = xp.zeros((xp.count_nonzero(mask), 1), dtype=xp.complex)
for ii in range(n_spokes):
# build Afull (and take only 0 locations into matrix)
Anum = rf.PtxSpatialExplicit(sens, k, gts, mask.shape,
ret_array=True)
Anum = Anum[~(Anum == 0).all(1)]
# design wfull using MLS:
# initialize wfull
sys_a = (Anum.conj().T @ Anum + alpha * xp.eye((ii+1)*nc))
sys_b = (Anum.conj().T @ xp.exp(1j*phs))
w_full = xp.linalg.solve(sys_a, sys_b)
err = Anum @ w_full - xp.exp(1j * phs)
cost = err.conj().T @ err + alpha * w_full.conj().T @ w_full
cost = xp.real(cost)
cost_old = 10 * cost # to get the loop going
while xp.absolute(cost - cost_old) > iter_dif * cost_old:
cost_old = cost
phs = xp.angle(Anum @ w_full)
w_full = xp.linalg.solve(
(Anum.conj().T @ Anum + alpha * xp.eye((ii + 1) * nc)),
(Anum.conj().T @ xp.exp(1j * phs)))
err = Anum @ w_full - xp.exp(1j * phs)
cost = xp.real(err.conj().T @ err +
alpha * w_full.conj().T @ w_full)
# add a spoke using greedy method
if ii < n_spokes - 1:
r = xp.exp(1j * phs) - Anum @ w_full
rfnorm = xp.zeros(kxs.shape, dtype=xp.complex)
for jj in range(kxs.size):
ks_test = xp.expand_dims(xp.array([kxs[jj], kys[jj]]), 0)
Anum = rf.PtxSpatialExplicit(sens, ks_test, gts,
mask.shape, ret_array=True)
Anum = Anum[~(Anum == 0).all(1)]
rfm = xp.linalg.solve((Anum.conj().T @ Anum),
(Anum.conj().T @ r))
rfnorm[jj] = xp.linalg.norm(rfm)
ind = xp.argmax(rfnorm)
k_new = xp.expand_dims(xp.array([kxs[ind], kys[ind]]), 0)
if ii % 2 != 0: # add to end of pulse
k = xp.concatenate((k, k_new))
else: # add to beginning of pulse
k = xp.concatenate((k_new, k))
# remove chosen point from candidates
kxs = xp.concatenate([kxs[:ind], kxs[ind + 1:]])
kys = xp.concatenate([kys[:ind], kys[ind + 1:]])
# from our spoke selections, build the whole waveforms
# first, design our gradient waveforms:
g = rf.spokes_grad(k, tbw, sl_thick, gmax, dgdtmax, gts)
# design our rf
# calc. the size of the traps in our gz waveform- will use to calc rf
area = tbw / (sl_thick / 10) / 4257 # thick*kwid=twb, kwid=gam*area
[subgz, nramp] = rf.min_trap_grad(area, gmax, dgdtmax, gts)
npts = 128
subrf = rf.dzrf(npts, tbw, 'st')
n_plat = subgz.size - 2 * nramp # time points on trap plateau
# interpolate to stretch out waveform to appropriate length
f = interp1d(np.arange(0, npts, 1) / npts, subrf,
fill_value='extrapolate')
subrf = f(xp.arange(0, n_plat, 1) / n_plat)
subrf = xp.concatenate((xp.zeros(nramp), subrf, xp.zeros(nramp)))
pulses = xp.kron(xp.reshape(w_full, (nc, n_spokes)), subrf)
# add zeros for gzref
rf_ref = xp.zeros((nc, g.shape[1] - pulses.shape[1]))
pulses = xp.concatenate((pulses, rf_ref), 1)
return pulses, g
| 40.809339 | 79 | 0.536041 |
d36d11181e1f3d02b8e600bf376c6d29357f94b7 | 139 | py | Python | qdbvcella/__init__.py | hhelmbre/qdbvcella | 59c80050e75be089d9228c74086b14e1e0bbcd59 | [
"MIT"
] | null | null | null | qdbvcella/__init__.py | hhelmbre/qdbvcella | 59c80050e75be089d9228c74086b14e1e0bbcd59 | [
"MIT"
] | null | null | null | qdbvcella/__init__.py | hhelmbre/qdbvcella | 59c80050e75be089d9228c74086b14e1e0bbcd59 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .version import __version__ # noqa
from .qdbvcella import * # noqa
| 34.75 | 64 | 0.805755 |
4ec9c420bd12a4d415529b0bd297387b160bed31 | 1,336 | py | Python | examples/time_machine_backtesting.py | thatguystone/requests-cache | 1c7cd3d7af6bf86bd2c1946168e7de8a95b7f8e8 | [
"BSD-2-Clause"
] | null | null | null | examples/time_machine_backtesting.py | thatguystone/requests-cache | 1c7cd3d7af6bf86bd2c1946168e7de8a95b7f8e8 | [
"BSD-2-Clause"
] | null | null | null | examples/time_machine_backtesting.py | thatguystone/requests-cache | 1c7cd3d7af6bf86bd2c1946168e7de8a95b7f8e8 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
An example of using the [time-machine](https://github.com/adamchainz/time-machine) library for backtesting,
e.g., testing with cached responses that were available at an arbitrary time in the past.
"""
from datetime import datetime
import requests
import time_machine
from requests_cache import CachedSession, set_response_defaults
class BacktestCachedSession(CachedSession):
def request(self, method: str, url: str, **kwargs):
response = super().request(method, url, **kwargs)
# Response was cached after the (simulated) current time, so ignore it and send a new request
if response.created_at and response.created_at > datetime.utcnow():
new_response = requests.request(method, url, **kwargs)
return set_response_defaults(new_response)
else:
return response
def demo():
session = BacktestCachedSession()
response = session.get('https://httpbin.org/get')
response = session.get('https://httpbin.org/get')
assert response.from_cache is True
# Response was not cached yet at this point, so we should get a fresh one
with time_machine.travel(datetime(2020, 1, 1)):
response = session.get('https://httpbin.org/get')
assert response.from_cache is False
if __name__ == '__main__':
demo()
| 33.4 | 107 | 0.705838 |
c064f6b81820bfac9d5e91e47fa0b20a0538adf9 | 248 | py | Python | workshop/patternprogram.py | rames4498/Bootcamps_and_workshops | f24e23c81219c35b2ac1e9908b1e3b9755b5ca29 | [
"Xnet",
"X11"
] | 7 | 2020-03-06T04:41:57.000Z | 2022-02-27T10:13:27.000Z | workshop/patternprogram.py | rames4498/Bootcamps_and_workshops | f24e23c81219c35b2ac1e9908b1e3b9755b5ca29 | [
"Xnet",
"X11"
] | null | null | null | workshop/patternprogram.py | rames4498/Bootcamps_and_workshops | f24e23c81219c35b2ac1e9908b1e3b9755b5ca29 | [
"Xnet",
"X11"
] | 6 | 2020-03-08T11:40:54.000Z | 2020-12-22T11:21:16.000Z | def pattern(n):
k = 2 * n - 2
for i in range(0,n):
for j in range(0,k):
print(end=" ")
k = k - 1
for j in range(0, i+1):
print("*", end=" ")
print(" ")
pattern(5)
| 19.076923 | 32 | 0.350806 |
051797eb5f30df9953d7eca9cda3668ee5264c72 | 892 | py | Python | accounts/accounts/urls.py | HaeckelK/bookkeeping | 6f8b62f1322fe1c409f397222653382d302d9754 | [
"MIT"
] | null | null | null | accounts/accounts/urls.py | HaeckelK/bookkeeping | 6f8b62f1322fe1c409f397222653382d302d9754 | [
"MIT"
] | 7 | 2021-06-30T12:05:47.000Z | 2021-07-14T07:50:27.000Z | accounts/accounts/urls.py | HaeckelK/bookkeeping | 6f8b62f1322fe1c409f397222653382d302d9754 | [
"MIT"
] | null | null | null | """accounts URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from dashboards import views
urlpatterns = [
path("admin/", admin.site.urls),
path("trial_balance/", views.trial_balance),
path("nominal_transactions/", views.nominal_transactions),
]
| 34.307692 | 77 | 0.71861 |
133f6cc55578970bc52766b4cfdcf4ef2059b7f6 | 23,279 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_local_network_gateways_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_local_network_gateways_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_local_network_gateways_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations:
"""LocalNetworkGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "models.LocalNetworkGateway",
**kwargs
) -> "models.LocalNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "models.LocalNetworkGateway",
**kwargs
) -> AsyncLROPoller["models.LocalNetworkGateway"]:
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs
) -> "models.LocalNetworkGateway":
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.LocalNetworkGateway":
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.LocalNetworkGatewayListResult"]:
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| 50.278618 | 209 | 0.678981 |
03d22098fc140857499e0e4bb3ff5fa6c90092ee | 11,586 | py | Python | gpxtools.py | rokdd/gpx-tools | 5e64fb7f2c65fc37e068172fc001fa538052f2c0 | [
"Apache-2.0",
"MIT"
] | null | null | null | gpxtools.py | rokdd/gpx-tools | 5e64fb7f2c65fc37e068172fc001fa538052f2c0 | [
"Apache-2.0",
"MIT"
] | null | null | null | gpxtools.py | rokdd/gpx-tools | 5e64fb7f2c65fc37e068172fc001fa538052f2c0 | [
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2014 Martijn Grendelman <m@rtijn.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from iso8601 import parse_date
from pprint import pprint, pformat
from lxml import etree # apt-get install python3-lxml
from math import radians, sin, cos, atan2, sqrt
from pytz import timezone # apt-get install python3-tz
from datetime import timedelta
# ns = '{http://www.topografix.com/GPX/1/0}'
ns = '{http://www.topografix.com/GPX/1/1}'
do_merge = False
do_duplicate_search=False
base_file = ''
# Get unique dates from all tracks
def get_dates(tree, ns, tz, split_time):
dates = set()
for trk in tree.iterchildren(ns + 'trk'):
dates.add(get_date(trk, ns, tz, split_time=split_time))
return dates
def get_name(trk, ns):
return trk.findtext(ns + 'name') or ''
def get_date(trk, ns, tz=None, split_time=timedelta()):
gpxtime = trk.findtext(ns + 'trkseg/' + ns + 'trkpt/' + ns + 'time')
tzobj = None
# use automatic timezone from coordinates
if type(tz).__name__ == "TimezoneFinder":
lat = trk.find(ns + 'trkseg/' + ns + 'trkpt[@lat]')
lng = lat.get('lon')
lat = lat.get('lat')
# From the lat/long, get the tz-database-style time zone name (e.g. 'America/Vancouver') or None
timezone_str = tz.certain_timezone_at(lat=float(lat), lng=float(lng))
if timezone_str is None:
# print("Could not determine the time zone")
pass
else:
# Display the current time in that time zone
tzobj = timezone(timezone_str)
# use timezone which is globally specified
elif tz is not None:
tzobj = timezone(tz)
# parse_date returns a datetime.datetime by timezone if isset
if tzobj is not None:
return (parse_date(gpxtime) - split_time).astimezone(tzobj).date()
return (parse_date(gpxtime) - split_time).parse_date(gpxtime).date()
def get_datetime(trk, ns, tz=None):
gpxtime = trk.findtext(ns + 'trkseg/' + ns + 'trkpt/' + ns + 'time')
if gpxtime is not None:
# parse_date returns a datetime.datetime
if tz is not None:
tzobj = timezone(tz)
return parse_date(gpxtime).astimezone(tzobj)
return parse_date(gpxtime)
def get_numpts(trk, ns):
return len(trk.findall(ns + 'trkseg/' + ns + 'trkpt'))
def get_numtrk(root, ns):
return len(root.findall(ns + 'trk'))
def get_numwpt(root, ns):
return len(root.findall(ns + 'wpt'))
def get_numrte(root, ns):
return len(root.findall(ns + 'rte'))
def get_numrtept(rte, ns):
return len(rte.findall(ns + 'rtept'))
def distance(lat1, lon1, lat2, lon2):
radius = 6371000 # meter
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = sin(dlat / 2) * sin(dlat / 2) + cos(lat1) \
* cos(lat2) * sin(dlon / 2) * sin(dlon / 2)
c = 2 * atan2(sqrt(a), sqrt(1 - a))
d = radius * c
return d
def make_filename(d, dir='.'):
global do_merge, base_file
do_merge = False
n = 0
fn = "%s-%03d.gpx" % (d.isoformat(), n)
fname = os.path.join(dir, fn)
base_file = fname
while os.path.exists(fname):
n += 1
fn = "%s-%03d.gpx" % (d.isoformat(), n)
fname = os.path.join(dir, fn)
do_merge = True
return fname
def split(filename, tz, split_time=timedelta()):
global ns, do_merge, do_duplicate_search
try:
tree0 = etree.parse(filename)
except Exception as e:
print("Could not parse GPX: %s" % e)
return False
root = tree0.getroot()
# Get the XML namespace from the tree
ns = "{%s}" % root.nsmap[None]
dates = get_dates(root, ns, tz, split_time=split_time)
# Iterate over the dates and remove non-matching tracks from the tree
for d in dates:
fname = make_filename(d)
tree = copy.deepcopy(tree0)
root = tree.getroot()
tracks = {}
for trk in root.iterchildren():
# Remove all non-track elements
if trk.tag != ns + 'trk':
root.remove(trk)
continue
name = get_name(trk, ns)
trackdate = get_date(trk, ns, tz, split_time=split_time)
if trackdate != d:
# print("%-25s: date mismatch, removing %s" % (fname, name))
root.remove(trk)
elif name in tracks:
oldnum = tracks[name]['numpts']
newnum = get_numpts(trk, ns)
# sometimes there is no name and a huge file.. then just merge the segments..
if do_duplicate_search == False:
print("%-25s: DUPLICATES %s (track points: old=%d + new=%d) -> concating" % (
fname, name, oldnum, newnum))
for trkseg in trk.iterchildren():
tracks[name]['track'].append(trkseg)
tracks[name]['numpts']+=newnum
print("%-25s: DUPLICATES %s (track points: old=%d + new=%d) = concated %d" % (
fname, name, oldnum, newnum, len(tracks[name]['track'].findall('.//'+ns+'trkpt'))))
root.remove(trk)
elif oldnum >= newnum:
print("%-25s: DUPLICATE %s (track points: old=%d, new=%d) -> removing" % (
fname, name, oldnum, newnum))
root.remove(trk)
else:
# newnum > oldnum. Old track should be removed and this one kept.
print(
"%-25s: duplicate %s (track points differ, old=%d new=%d) -> keeping and removing the old one" % \
(fname, name, oldnum, newnum))
root.remove(tracks[name]['track'])
tracks[name] = {'numpts': newnum, 'track': trk}
else:
numpts = get_numpts(trk, ns)
tracks[name] = {'numpts': numpts, 'track': trk}
# print("%-25s: keeping %s" % (fname, name))
print("%-25s: writing file" % fname)
tree.write(fname, xml_declaration=True, encoding='utf-8')
# Merge if necessary
if do_merge:
print("%-25s: starting merge into %s" % (fname, base_file))
merge(base_file, fname, False)
# Merge tracks from file2 into file1.
# On duplicate names, keep the track with the most track points.
def merge(file1, file2, interactive=True):
try:
tree1 = etree.parse(file1)
tree2 = etree.parse(file2)
except Exception as e:
print("Could not parse GPX: %s" % e)
return False
root1 = tree1.getroot()
root2 = tree2.getroot()
ns1 = "{%s}" % root1.nsmap[None]
ns2 = "{%s}" % root2.nsmap[None]
# print("%-25s: Namespace: %s" % (file1, ns1))
# print("%-25s: Namespace: %s" % (file2, ns2))
modified = False
# Analyze the first file
tracks1 = {}
for trk in root1.iterchildren(ns1 + 'trk'):
name = get_name(trk, ns1)
numpts = get_numpts(trk, ns1)
if not name in tracks1:
tracks1[name] = {'numpts': numpts, 'track': trk}
else:
print("Track '%s' already seen in '%s'. File contains dupes?" % (name, file1))
oldnum = tracks[name]['numpts']
if numpts > oldnum:
print("Duplicate '%s' replacing old track (old=%d, new=%d points)" % (name, oldnum, numpts))
tracks1[name] = {'numpts': numpts, 'track': trk}
for trk in root2.iterchildren(ns2 + 'trk'):
name = get_name(trk, ns2)
numpts = get_numpts(trk, ns2)
if not name in tracks1:
print("%-25s: appending track '%s'" % (file1, name))
root1.append(copy.deepcopy(trk))
tracks1[name] = {'numpts': numpts, 'track': trk}
modified = True
else:
oldpts = tracks1[name]['numpts']
if numpts > oldpts:
print("%-25s: replacing track '%s'. oldpts=%d. newpts=%d" % (file2, name, oldpts, numpts))
root1.remove(tracks1[name]['track'])
root1.append(copy.deepcopy(trk))
tracks1[name] = {'numpts': numpts, 'track': trk}
modified = True
# else:
# print("%-25s: skipping track '%s'. oldpts=%d. newpts=%d" % (file2, name, oldpts, numpts))
if modified:
yn = False
if interactive:
while yn not in ['y', 'n']:
yn = input("Overwrite '%s' and remove '%s' ? (y/n)" % (file1, file2))
if not interactive or yn == 'y':
print("%-25s: Overwriting file" % file1)
tree1.write(file1, xml_declaration=True, encoding='utf-8')
print("%-25s: Removing file" % file2)
os.remove(file2)
else:
print("%-25s: No changes to write to file" % file1)
yn = False
if interactive:
while yn not in ['y', 'n']:
yn = raw_input("Remove '%s' ? (y/n)" % file2)
if not interactive or yn == 'y':
print("%-25s: Removing file" % file2)
os.remove(file2)
def info(filename, tz):
global ns
try:
tree0 = etree.parse(filename)
except Exception as e:
print("Could not parse GPX: %s" % e)
return False
root = tree0.getroot()
# Get the XML namespace from the tree
ns = "{%s}" % root.nsmap[None]
print("Number of tracks : %d" % get_numtrk(root, ns))
print("Number of routes : %d" % get_numrte(root, ns))
print("Number of waypoints: %d" % get_numwpt(root, ns))
print('')
for trk in root.iterchildren(ns + 'trk'):
name = get_name(trk, ns)
trackdate = get_datetime(trk, ns, tz)
print("Track name : %s " % name)
print("Track date/time : %s " % trackdate)
n = 0
trkd = 0
for trkseg in trk.iterchildren(ns + 'trkseg'):
numpts = len(list(trkseg))
oldlat = None
d = 0
for trkpt in trkseg.iterchildren(ns + 'trkpt'):
lat = float(trkpt.get('lat'))
lon = float(trkpt.get('lon'))
if oldlat != None:
d += distance(oldlat, oldlon, lat, lon)
oldlat = lat
oldlon = lon
print("Segment %3d : %4d track points, distance: %d meter" % (n, numpts, d))
n += 1
trkd += d
pts = get_numpts(trk, ns)
print("Total points : %4d" % pts)
print("Total distance : %d meter" % trkd)
print('')
for rte in root.iterchildren(ns + 'rte'):
print("Route name : %s " % get_name(rte, ns).encode('utf-8'))
print("Numer of route points: %d" % get_numrtept(rte, ns))
print('')
for wpt in root.iterchildren(ns + 'wpt'):
name = get_name(wpt, ns)
if name:
print("Waypoint name : %s " % get_name(wpt, ns).encode('utf-8'))
# vim: ts=4 sw=4 et :
| 33.778426 | 122 | 0.556189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.