hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3129b4e07525b3bb8f25773db96fb8cb8950760d | 13,717 | py | Python | src/Source.py | SGEthan/Info_Retrieving | 158376a8796abbfa239d316ee91ddc6436243476 | [
"MIT"
] | null | null | null | src/Source.py | SGEthan/Info_Retrieving | 158376a8796abbfa239d316ee91ddc6436243476 | [
"MIT"
] | null | null | null | src/Source.py | SGEthan/Info_Retrieving | 158376a8796abbfa239d316ee91ddc6436243476 | [
"MIT"
] | null | null | null | import json
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import os
import re
import math
from scipy import sparse
import numpy as np
from scipy import spatial
import time
ORIGINAL_ARTICLE_PATH = '../dataset/US_Financial_News_Articles/'
EDITED_TEXT_PATH = '../output/Edited_dataset/'
OUTPUT_PATH = '../output/'
AND = 0
OR = 1
NOT = 2
priority = {'AND': 1, 'OR': 1, 'NOT': 2, '(': 0}
Op = ['AND', 'OR', 'NOT']
bracket = ['(', ')']
def for_every_original_articles(base):
for root, ds, fs in os.walk(base):
for d in ds:
outpath = '../output/Edited_dataset/' + d
if not os.path.exists(outpath):
os.mkdir(outpath)
for f in fs:
if re.match(r'.*.json', f):
fullname = os.path.join(root, f)
yield (fullname, f)
def original_file_op(in_name, name):
with open(in_name, 'r', encoding='UTF-8') as f:
in_dict = json.load(f)
text = in_dict['text']
edited_text = original_text_op(text)
out_dict = {'Edited_text': edited_text}
dir_list = re.split(r'[/\\]', in_name)
dir_of_file = dir_list[len(dir_list) - 2]
outpath = EDITED_TEXT_PATH + dir_of_file
out_name = os.path.join(outpath, 'edited_' + name)
with open(out_name, 'w', encoding='UTF-8') as f:
json.dump(out_dict, f)
return out_name
def original_text_op(text):
cutwords1 = word_tokenize(text) # cut words
interpunctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
cutwords2 = [word for word in cutwords1 if word not in interpunctuations] # delete interpunctuations
cutwords3 = []
for word in cutwords2:
cutwords3.append(PorterStemmer().stem(word)) # get stems
stops = set(stopwords.words("english"))
cutwords4 = [word for word in cutwords3 if word not in stops] # delete stopwords
return cutwords4
def for_every_edited_articles(base):
for root, ds, fs in os.walk(base):
for f in fs:
if re.match(r'.*.json', f):
fullname = os.path.join(root, f)
yield (fullname, f)
def edited_text_ivtable_op(in_name, name, count, total_word_list, inverted_table):
# Operation to the edited texts, creating the inverted page table
with open(in_name, 'r', encoding='UTF-8') as f:
in_dict = json.load(f)
for word in in_dict['Edited_text']:
word_list = inverted_table.setdefault(word, [count[0], [count[1]]])
if word_list != [count[0], [count[1]]]:
if word_list[1][len(word_list[1]) - 1] != count[1]:
inverted_table[word][1].append(count[1])
else:
total_word_list.append(word)
count[0] += 1
def create_inverted_table():
inverted_table = {}
word_list = []
count = [0, 0] # count[0]: word count; count[1]: article count
for (full_dir, name) in for_every_edited_articles(EDITED_TEXT_PATH):
edited_text_ivtable_op(full_dir, name, count, word_list, inverted_table)
print(count[1])
count[1] += 1
with open(os.path.join(OUTPUT_PATH, 'inverted_table.json'), 'w', encoding='UTF-8') as f:
json.dump(inverted_table, f) # creating a list that indicates every file and their number
with open(os.path.join(OUTPUT_PATH, 'word_list.json'), 'w', encoding='UTF-8') as f:
json.dump(word_list, f) # creating a list that indicates every file and its number
print('inverted table created')
return inverted_table
def create_word_dict():
# create a dictionary from which we can find the number of a word
with open(os.path.join(OUTPUT_PATH, 'word_list.json'), 'r', encoding='UTF-8') as f:
word_list = json.load(f)
i = 0
word_dict = {}
for word in word_list:
word_dict[word] = i
print(i)
i += 1
with open(os.path.join(OUTPUT_PATH, 'word_dict.json'), 'w', encoding='UTF-8') as f:
json.dump(word_dict, f)
def display_the_titles(lst):
with open(os.path.join(OUTPUT_PATH, 'file_name_list.json'), 'r', encoding='UTF-8') as f:
file_name_list = json.load(f) # creating a list that indicates every file and its number
for index in lst:
with open(file_name_list[index][0], 'r', encoding='UTF-8') as f:
in_dict = json.load(f)
print(index, ':', in_dict['title'])
def boolean_transfer(k):
word_list = word_tokenize(k)
word_list_2 = []
for word in word_list:
if (word not in Op) & (word not in bracket):
word_list_2.append(PorterStemmer().stem(word))
else:
word_list_2.append(word)
edited_list = transform_op(word_list_2)
return edited_list
def transform_op(word_list):
word_stack = []
op_stack = []
for word in word_list:
if (word not in Op) & (word not in bracket):
word_stack.append(word)
elif word in Op:
while len(op_stack) > 0:
if priority[op_stack[len(op_stack) - 1]] >= priority[word]:
word_stack.append(op_stack.pop())
else:
break
op_stack.append(word)
elif word == '(':
op_stack.append(word)
else:
oprt = op_stack.pop()
while oprt != '(':
word_stack.append(oprt)
oprt = op_stack.pop()
while len(op_stack) > 0:
word_stack.append(op_stack.pop())
op_stack.clear()
for word in word_stack:
if word not in Op:
op_stack.append(word)
elif word == 'NOT':
op_1 = op_stack.pop()
op_stack.append([op_1, NOT])
elif word == 'AND':
op_2 = op_stack.pop()
op_1 = op_stack.pop()
op_stack.append([op_1, op_2, AND])
else:
op_2 = op_stack.pop()
op_1 = op_stack.pop()
op_stack.append([op_1, op_2, OR])
return op_stack[0]
def boolean_retrieval(search, inverted_table):
print(search)
if type(search) == str: # if it's a word
if search in inverted_table:
return inverted_table[search][1]
else:
return []
word_list_out = []
if len(search) == 2: # NOT case
tmp_list = boolean_retrieval(search[0], inverted_table)
word_list_out = list(range(0, 306242))
for number in tmp_list:
word_list_out.remove(number)
else:
if (type(search[0]) != list) & (type(search[1]) != list):
word_list_1 = inverted_table[search[0]][1]
word_list_2 = inverted_table[search[1]][1]
else: # recursive call
word_list_1 = boolean_retrieval(search[0], inverted_table)
word_list_2 = boolean_retrieval(search[1], inverted_table)
if search[2] == AND: # AND case
word_list_out = [number for number in word_list_1 if number in word_list_2]
elif search[2] == OR: # OR case
word_list_out = word_list_1
for word in word_list_2:
if word not in word_list_out:
word_list_out.append(word)
word_list_out.sort()
return word_list_out
def boolean_search():
search_string = input('enter ur search statement here:')
time0 = time.time()
search = boolean_transfer(search_string)
with open(os.path.join(OUTPUT_PATH, 'inverted_table.json'), 'r', encoding='UTF-8') as f:
inverted_table = json.load(f)
search_result = boolean_retrieval(search, inverted_table)
with open(os.path.join(OUTPUT_PATH, 'search_result.json'), 'w', encoding='UTF-8') as f:
json.dump(search_result, f)
print('Matched file numbers:')
print(search_result)
print('And their titles:')
display_the_titles(search_result)
print('time used:')
print(time.time() - time0)
def creating_corpus():
collection = []
i = 0
with open(os.path.join(OUTPUT_PATH, 'file_name_list.json'), 'r', encoding='UTF-8') as f:
file_name_list = json.load(f)
for file in file_name_list:
with open(file[1], 'r', encoding='UTF-8') as g:
article = json.load(g)
word_collection = article['Edited_text']
collection.append(word_collection)
print(i)
i += 1
with open(os.path.join(OUTPUT_PATH, 'corpus.json'), 'w', encoding='UTF-8') as f:
json.dump(collection, f)
return collection
def tf(word, word_list):
return float(word_list.count(word)/len(word_list))
def creating_idf_dict(corpus, word_list):
idf_dict = {}
length = len(corpus)
with open(os.path.join(OUTPUT_PATH, 'inverted_table.json'), 'r', encoding='UTF-8') as f:
inverted_table = json.load(f)
word_count = 0
for word in word_list:
count = len(inverted_table[word])
idf = math.log(length/(1+count))
idf_dict[word] = idf
print(word_count, idf)
word_count += 1
with open(os.path.join(OUTPUT_PATH, 'word_idf_dict.json'), 'w', encoding='UTF-8') as g:
json.dump(idf_dict, f) # creating a dictionary that indicated every word and its idf
def tf_idf():
creating_corpus()
with open(os.path.join(OUTPUT_PATH, 'corpus.json'), 'r', encoding='UTF-8') as f:
corpus = json.load(f)
with open(os.path.join(OUTPUT_PATH, 'word_dict.json'), 'r', encoding='UTF-8') as f:
word_dict = json.load(f)
with open(os.path.join(OUTPUT_PATH, 'word_list.json'), 'r', encoding='UTF-8') as f:
word_list = json.load(f)
creating_idf_dict(corpus, word_list)
with open(os.path.join(OUTPUT_PATH, 'word_idf_dict.json'), 'r', encoding='UTF-8') as f:
idf_dict = json.load(f)
tf_idf_matrix = {}
i = 0
for article in corpus:
tf_idf_matrix[i] = {}
for word in article: # for every word
if word_dict[word] not in tf_idf_matrix[i]: # if not computed before
ti = tf(word, article) * idf_dict[word]
if ti != 0:
tf_idf_matrix[i][word_dict[word]] = ti
print((i, word_dict[word], ti))
i += 1
with open(os.path.join(OUTPUT_PATH, 'tf_idf_matrix.json'), 'w', encoding='UTF-8') as f:
json.dump(tf_idf_matrix, f)
def for_every_tf_idf_vec(tf_idf_matrix, word_dict):
# with open(os.path.join(OUTPUT_PATH, 'tf_idf_matrix.json'), 'r', encoding='UTF-8') as f:
# tf_idf_matrix = json.load(f)
row = []
column = []
mtx_value = []
# with open(os.path.join(OUTPUT_PATH, 'word_dict.json'), 'r', encoding='UTF-8') as f:
# word_dict = json.load(f)
length = len(word_dict)
for i in range(0, 306241):
for key in tf_idf_matrix[str(i)]:
row.append(0)
column.append(int(key))
value = tf_idf_matrix[str(i)][key]
mtx_value.append(value)
a = sparse.coo_matrix((mtx_value, (row, column)), shape=(1, length))
row.clear()
column.clear()
mtx_value.clear()
yield a
def insert(lst, pair, max_length):
length = len(lst)
if length == 0:
lst.append(pair)
return
elif length == max_length:
if pair[1] >= lst[max_length-1][1]:
return
i = 0
while lst[i][1] <= pair[1]:
i += 1
if i == length:
break
if (i == length) & (i < max_length):
lst.append(pair)
else:
lst.insert(i, pair)
if len(lst) > max_length:
del lst[max_length]
def semantic_retrieval():
with open(os.path.join(OUTPUT_PATH, 'word_dict.json'), 'r', encoding='UTF-8') as f:
word_dict = json.load(f)
with open(os.path.join(OUTPUT_PATH, 'word_idf_dict.json'), 'r', encoding='UTF-8') as f:
idf_dict = json.load(f)
input_string = input('enter ur searching statement here:')
input_list = word_tokenize(input_string)
i = []
j = []
ti_list = []
for word in input_list:
if word in word_dict:
ti = tf(word, input_list) * idf_dict[word]
i.append(word_dict[word])
j.append(0)
ti_list.append(ti)
print((0, word, word_dict[word], ti))
length = len(word_dict)
a = sparse.coo_matrix((ti_list, (j, i)), shape=(1, length)).toarray()
i = 0
least_distance = []
with open(os.path.join(OUTPUT_PATH, 'tf_idf_matrix.json'), 'r', encoding='UTF-8') as f:
tf_idf_matrix = json.load(f)
time0 = time.time()
for cur_vec in for_every_tf_idf_vec(tf_idf_matrix, word_dict):
i += 1
distance = np.linalg.norm(cur_vec.toarray()-a) # 7.578181028366089s
# distance = spatial.distance.euclidean(a, cur_vec.toarray()) # 8.908097743988037s
insert(least_distance, (i, distance), 10)
print((i, distance))
if i > 1000:
print(time.time()-time0)
break
title_list = []
for pair in least_distance:
print(pair)
title_list.append(pair[0])
display_the_titles(title_list)
def main():
file_name_list = []
for (full_dir, name) in for_every_original_articles(ORIGINAL_ARTICLE_PATH):
print(full_dir)
full_out_name = original_file_op(full_dir, name)
file_name_list.append((full_dir, full_out_name))
with open(os.path.join(OUTPUT_PATH, 'file_name_list.json'), 'w', encoding='UTF-8') as f:
json.dump(file_name_list, f) # creating a list that indicates every file and its number
create_inverted_table()
create_word_dict()
tf_idf()
os.system('pause')
if __name__ == '__main__':
main()
| 30.824719 | 105 | 0.597725 |
6c26876073a7d268b18be30f7fb47bffa6534385 | 88 | py | Python | nanome/_internal/_network/_commands/_callbacks/_load_file_done.py | devanshuDesai/nanome | aabb536ef498044e97bc97ed71af940358bf36f2 | [
"MIT"
] | null | null | null | nanome/_internal/_network/_commands/_callbacks/_load_file_done.py | devanshuDesai/nanome | aabb536ef498044e97bc97ed71af940358bf36f2 | [
"MIT"
] | null | null | null | nanome/_internal/_network/_commands/_callbacks/_load_file_done.py | devanshuDesai/nanome | aabb536ef498044e97bc97ed71af940358bf36f2 | [
"MIT"
] | null | null | null | def _load_file_done(network, result, request_id):
network._call(request_id, result)
| 29.333333 | 49 | 0.784091 |
5cba20420027712adf0c7d8a81df4f915efb2a3c | 3,017 | py | Python | zamba/models/cnnensemble/src/utils.py | qyj0731/data_work | ba303ce696ae5fefe05d05654fcf65682448e42f | [
"MIT"
] | null | null | null | zamba/models/cnnensemble/src/utils.py | qyj0731/data_work | ba303ce696ae5fefe05d05654fcf65682448e42f | [
"MIT"
] | null | null | null | zamba/models/cnnensemble/src/utils.py | qyj0731/data_work | ba303ce696ae5fefe05d05654fcf65682448e42f | [
"MIT"
] | null | null | null | import math
import pickle
import time
import random
from contextlib import contextmanager
import concurrent.futures
from queue import Queue
import numpy as np
import skvideo.io
import skimage.transform
def preprocessed_input_to_img_resnet(x):
# Zero-center by mean pixel
x = x.copy()
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR' -> RGB
img = x.copy()
img[:, :, 0] = x[:, :, 2]
img[:, :, 1] = x[:, :, 1]
img[:, :, 2] = x[:, :, 0]
return img / 255.0
@contextmanager
def timeit_context(name):
startTime = time.time()
yield
elapsedTime = time.time() - startTime
print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
def chunks(l, n, add_empty=False):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l) // n * n + n - 1, n):
if len(l[i:i + n]):
yield l[i:i + n]
if add_empty:
yield []
def lock_layers_until(model, first_trainable_layer, verbose=False):
found_first_layer = False
for layer in model.layers:
if layer.name == first_trainable_layer:
found_first_layer = True
if verbose and found_first_layer and not layer.trainable:
print('Make layer trainable:', layer.name)
layer.trainable = True
layer.trainable = found_first_layer
def print_stats(title, array):
print('{} shape:{} dtype:{} min:{} max:{} mean:{} median:{}'.format(
title,
array.shape,
array.dtype,
np.min(array),
np.max(array),
np.mean(array),
np.median(array)
))
def load_video_clip_frames(video_fn, frames_numbers, output_size):
"""
Load video clip frames.
Load frames or requested frames_numbers and resize if necessary to output_size
:param video_fn: path to video clip
:param frames_numbers: list of frame numbers to load
:param output_size: (rows, cols) tuple, size of loaded image
:return: ndarray of shape (len(frames_numbers), rows, cols, 3)
"""
X = np.zeros(shape=(len(frames_numbers),) + output_size + (3,), dtype=np.float32)
v = skvideo.io.vread(str(video_fn))
valid_frames = 0
for i, frame_num in enumerate(frames_numbers):
try:
frame = v[frame_num]
if frame.shape[:2] != output_size:
frame = skimage.transform.resize(frame,
output_shape=output_size,
order=1,
mode='constant',
preserve_range=True).astype(np.float32)
else:
frame = frame.astype(np.float32)
X[i] = frame
valid_frames += 1
except IndexError:
if valid_frames > 0:
X[i] = X[i % valid_frames]
else:
X[i] = 0.0
return X
if __name__ == '__main__':
pass
| 28.196262 | 88 | 0.557507 |
a63bb32f09e6e06183903f04e51f1688cf14a2d9 | 5,776 | py | Python | kgx/graph_operations/graph_merge.py | gouttegd/kgx | 1efa0bfaf62113855ffc1d002903236e4ff8706a | [
"BSD-3-Clause"
] | 32 | 2020-10-21T17:35:27.000Z | 2022-03-17T02:40:08.000Z | kgx/graph_operations/graph_merge.py | gouttegd/kgx | 1efa0bfaf62113855ffc1d002903236e4ff8706a | [
"BSD-3-Clause"
] | 136 | 2018-04-24T02:15:39.000Z | 2020-10-02T00:14:13.000Z | kgx/graph_operations/graph_merge.py | gouttegd/kgx | 1efa0bfaf62113855ffc1d002903236e4ff8706a | [
"BSD-3-Clause"
] | 19 | 2018-05-03T17:03:08.000Z | 2020-07-15T22:12:40.000Z | import copy
from typing import List
from kgx.config import get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.utils.kgx_utils import prepare_data_dict
log = get_logger()
def merge_all_graphs(graphs: List[BaseGraph], preserve: bool = True) -> BaseGraph:
"""
Merge one or more graphs.
.. note::
This method will first pick the largest graph in ``graphs`` and use that
as the target to merge the remaining graphs. This is to reduce the memory
footprint for this operation. The criteria for largest graph is the graph
with the largest number of edges.
The caveat is that the merge operation has a side effect where the largest
graph is altered.
If you would like to ensure that all incoming graphs remain as-is, then
look at ``merge_graphs``.
The outcome of the merge on node and edge properties depend on the ``preserve`` parameter.
If preserve is ``True`` then,
- core properties will not be overwritten
- other properties will be concatenated to a list
If preserve is ``False`` then,
- core properties will not be overwritten
- other properties will be replaced
Parameters
----------
graphs: List[kgx.graph.base_graph.BaseGraph]
A list of instances of BaseGraph to merge
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
kgx.graph.base_graph.BaseGraph
The merged graph
"""
graph_size = [len(x.edges()) for x in graphs]
largest = graphs.pop(graph_size.index(max(graph_size)))
log.debug(
f"Largest graph {largest.name} has {len(largest.nodes())} nodes and {len(largest.edges())} edges"
)
merged_graph = merge_graphs(largest, graphs, preserve)
return merged_graph
def merge_graphs(
graph: BaseGraph, graphs: List[BaseGraph], preserve: bool = True
) -> BaseGraph:
"""
Merge all graphs in ``graphs`` to ``graph``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
An instance of BaseGraph
graphs: List[kgx.graph.base_graph.BaseGraph]
A list of instances of BaseGraph to merge
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
kgx.graph.base_graph.BaseGraph
The merged graph
"""
for g in graphs:
node_merge_count = add_all_nodes(graph, g, preserve)
log.info(
f"Number of nodes merged between {graph.name} and {g.name}: {node_merge_count}"
)
edge_merge_count = add_all_edges(graph, g, preserve)
log.info(
f"Number of edges merged between {graph.name} and {g.name}: {edge_merge_count}"
)
return graph
def add_all_nodes(g1: BaseGraph, g2: BaseGraph, preserve: bool = True) -> int:
"""
Add all nodes from source graph (``g2``) to target graph (``g1``).
Parameters
----------
g1: kgx.graph.base_graph.BaseGraph
Target graph
g2: kgx.graph.base_graph.BaseGraph
Source graph
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
int
Number of nodes merged during this operation
"""
log.info(f"Adding {g2.number_of_nodes()} nodes from {g2.name} to {g1.name}")
merge_count = 0
for n, data in g2.nodes(data=True):
if n in g1.nodes():
merge_node(g1, n, data, preserve)
merge_count += 1
else:
g1.add_node(n, **data)
return merge_count
def merge_node(g: BaseGraph, n: str, data: dict, preserve: bool = True) -> dict:
"""
Merge node ``n`` into graph ``g``.
Parameters
----------
g: kgx.graph.base_graph.BaseGraph
The target graph
n: str
Node id
data: dict
Node properties
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
dict
The merged node
"""
existing_node = g.nodes()[n]
new_data = prepare_data_dict(
copy.deepcopy(existing_node), copy.deepcopy(data), preserve
)
g.add_node(n, **new_data)
return existing_node
def add_all_edges(g1: BaseGraph, g2: BaseGraph, preserve: bool = True) -> int:
"""
Add all edges from source graph (``g2``) to target graph (``g1``).
Parameters
----------
g1: kgx.graph.base_graph.BaseGraph
Target graph
g2: kgx.graph.base_graph.BaseGraph
Source graph
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
int
Number of edges merged during this operation
"""
log.info(f"Adding {g2.number_of_edges()} edges from {g2} to {g1}")
merge_count = 0
for u, v, key, data in g2.edges(keys=True, data=True):
if g1.has_edge(u, v, key):
merge_edge(g1, u, v, key, data, preserve)
merge_count += 1
else:
g1.add_edge(u, v, edge_key=key, **data)
return merge_count
def merge_edge(
g: BaseGraph, u: str, v: str, key: str, data: dict, preserve: bool = True
) -> dict:
"""
Merge edge ``u`` -> ``v`` into graph ``g``.
Parameters
----------
g: kgx.graph.base_graph.BaseGraph
The target graph
u: str
Subject node id
v: str
Object node id
key: str
Edge key
data: dict
Node properties
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
dict
The merged edge
"""
existing_edge = g.get_edge(u, v, key)
new_data = prepare_data_dict(
copy.deepcopy(existing_edge), copy.deepcopy(data), preserve
)
g.add_edge(u, v, edge_key=key, **new_data)
return existing_edge
| 26.990654 | 105 | 0.621711 |
ca30f97beff341ebbf7e24707bc35bf04e81b0e3 | 23,951 | py | Python | sphinx/cmd/quickstart.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 4,973 | 2015-01-03T15:44:00.000Z | 2022-03-31T03:11:51.000Z | sphinx/cmd/quickstart.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 7,850 | 2015-01-02T08:09:25.000Z | 2022-03-31T18:57:40.000Z | sphinx/cmd/quickstart.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 2,179 | 2015-01-03T15:26:53.000Z | 2022-03-31T12:22:44.000Z | """
sphinx.cmd.quickstart
~~~~~~~~~~~~~~~~~~~~~
Quickly setup documentation source to work with Sphinx.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import locale
import os
import sys
import time
from collections import OrderedDict
from os import path
from typing import Any, Callable, Dict, List, Union
# try to import readline, unix specific enhancement
try:
import readline
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
USE_LIBEDIT = True
else:
readline.parse_and_bind("tab: complete")
USE_LIBEDIT = False
except ImportError:
readline = None
USE_LIBEDIT = False
from docutils.utils import column_width
import sphinx.locale
from sphinx import __display_version__, package_dir
from sphinx.locale import __
from sphinx.util.console import bold, color_terminal, colorize, nocolor, red # type: ignore
from sphinx.util.osutil import ensuredir
from sphinx.util.template import SphinxRenderer
EXTENSIONS = OrderedDict([
('autodoc', __('automatically insert docstrings from modules')),
('doctest', __('automatically test code snippets in doctest blocks')),
('intersphinx', __('link between Sphinx documentation of different projects')),
('todo', __('write "todo" entries that can be shown or hidden on build')),
('coverage', __('checks for documentation coverage')),
('imgmath', __('include math, rendered as PNG or SVG images')),
('mathjax', __('include math, rendered in the browser by MathJax')),
('ifconfig', __('conditional inclusion of content based on config values')),
('viewcode', __('include links to the source code of documented Python objects')),
('githubpages', __('create .nojekyll file to publish the document on GitHub pages')),
])
DEFAULTS = {
'path': '.',
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'makefile': True,
'batchfile': True,
}
PROMPT_PREFIX = '> '
if sys.platform == 'win32':
# On Windows, show questions as bold because of color scheme of PowerShell (refs: #5294).
COLOR_QUESTION = 'bold'
else:
COLOR_QUESTION = 'purple'
# function to get input from terminal -- overridden by the test suite
def term_input(prompt: str) -> str:
if sys.platform == 'win32':
# Important: On windows, readline is not enabled by default. In these
# environment, escape sequences have been broken. To avoid the
# problem, quickstart uses ``print()`` to show prompt.
print(prompt, end='')
return input('')
else:
return input(prompt)
class ValidationError(Exception):
"""Raised for validation errors."""
def is_path(x: str) -> str:
x = path.expanduser(x)
if not path.isdir(x):
raise ValidationError(__("Please enter a valid path name."))
return x
def is_path_or_empty(x: str) -> str:
if x == '':
return x
return is_path(x)
def allow_empty(x: str) -> str:
return x
def nonempty(x: str) -> str:
if not x:
raise ValidationError(__("Please enter some text."))
return x
def choice(*l: str) -> Callable[[str], str]:
def val(x: str) -> str:
if x not in l:
raise ValidationError(__('Please enter one of %s.') % ', '.join(l))
return x
return val
def boolean(x: str) -> bool:
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError(__("Please enter either 'y' or 'n'."))
return x.upper() in ('Y', 'YES')
def suffix(x: str) -> str:
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError(__("Please enter a file suffix, e.g. '.rst' or '.txt'."))
return x
def ok(x: str) -> str:
return x
def do_prompt(text: str, default: str = None, validator: Callable[[str], Any] = nonempty) -> Union[str, bool]: # NOQA
while True:
if default is not None:
prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
if USE_LIBEDIT:
# Note: libedit has a problem for combination of ``input()`` and escape
# sequence (see #5335). To avoid the problem, all prompts are not colored
# on libedit.
pass
elif readline:
# pass input_mode=True if readline available
prompt = colorize(COLOR_QUESTION, prompt, input_mode=True)
else:
prompt = colorize(COLOR_QUESTION, prompt, input_mode=False)
x = term_input(prompt).strip()
if default and not x:
x = default
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
return x
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir: str) -> None:
self.templatedir = templatedir or ''
super().__init__()
def _has_custom_template(self, template_name: str) -> bool:
"""Check if custom template file exists.
Note: Please don't use this function from extensions.
It will be removed in the future without deprecation period.
"""
template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(template):
return True
else:
return False
def render(self, template_name: str, context: Dict) -> str:
if self._has_custom_template(template_name):
custom_template = path.join(self.templatedir, path.basename(template_name))
return self.render_from_file(custom_template, context)
else:
return super().render(template_name, context)
def ask_user(d: Dict) -> None:
"""Ask the user for quickstart values missing from *d*.
Values are:
* path: root path
* sep: separate source and build dirs (bool)
* dot: replacement for dot in _templates etc.
* project: project name
* author: author names
* version: version of project
* release: release of project
* language: document language
* suffix: source file suffix
* master: master document name
* extensions: extensions to use (list)
* makefile: make Makefile
* batchfile: make command file
"""
print(bold(__('Welcome to the Sphinx %s quickstart utility.')) % __display_version__)
print()
print(__('Please enter values for the following settings (just press Enter to\n'
'accept a default value, if one is given in brackets).'))
if 'path' in d:
print()
print(bold(__('Selected root path: %s')) % d['path'])
else:
print()
print(__('Enter the root path for documentation.'))
d['path'] = do_prompt(__('Root path for the documentation'), '.', is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print()
print(bold(__('Error: an existing conf.py has been found in the '
'selected root path.')))
print(__('sphinx-quickstart will not overwrite existing Sphinx projects.'))
print()
d['path'] = do_prompt(__('Please enter a new root path (or just Enter to exit)'),
'', is_path_or_empty)
if not d['path']:
sys.exit(1)
if 'sep' not in d:
print()
print(__('You have two options for placing the build directory for Sphinx output.\n'
'Either, you use a directory "_build" within the root path, or you separate\n'
'"source" and "build" directories within the root path.'))
d['sep'] = do_prompt(__('Separate source and build directories (y/n)'), 'n', boolean)
if 'dot' not in d:
print()
print(__('Inside the root directory, two more directories will be created; "_templates"\n' # NOQA
'for custom HTML templates and "_static" for custom stylesheets and other static\n' # NOQA
'files. You can enter another prefix (such as ".") to replace the underscore.')) # NOQA
d['dot'] = do_prompt(__('Name prefix for templates and static dir'), '_', ok)
if 'project' not in d:
print()
print(__('The project name will occur in several places in the built documentation.'))
d['project'] = do_prompt(__('Project name'))
if 'author' not in d:
d['author'] = do_prompt(__('Author name(s)'))
if 'version' not in d:
print()
print(__('Sphinx has the notion of a "version" and a "release" for the\n'
'software. Each version can have multiple releases. For example, for\n'
'Python the version is something like 2.5 or 3.0, while the release is\n'
'something like 2.5.1 or 3.0a1. If you don\'t need this dual structure,\n'
'just set both to the same value.'))
d['version'] = do_prompt(__('Project version'), '', allow_empty)
if 'release' not in d:
d['release'] = do_prompt(__('Project release'), d['version'], allow_empty)
if 'language' not in d:
print()
print(__('If the documents are to be written in a language other than English,\n'
'you can select a language here by its language code. Sphinx will then\n'
'translate text that it generates into that language.\n'
'\n'
'For a list of supported codes, see\n'
'https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-language.')) # NOQA
d['language'] = do_prompt(__('Project language'), 'en')
if d['language'] == 'en':
d['language'] = None
if 'suffix' not in d:
print()
print(__('The file name suffix for source files. Commonly, this is either ".txt"\n'
'or ".rst". Only files with this suffix are considered documents.'))
d['suffix'] = do_prompt(__('Source file suffix'), '.rst', suffix)
if 'master' not in d:
print()
print(__('One document is special in that it is considered the top node of the\n'
'"contents tree", that is, it is the root of the hierarchical structure\n'
'of the documents. Normally, this is "index", but if your "index"\n'
'document is a custom template, you can also set this to another filename.'))
d['master'] = do_prompt(__('Name of your master document (without suffix)'), 'index')
while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
print()
print(bold(__('Error: the master file %s has already been found in the '
'selected root path.') % (d['master'] + d['suffix'])))
print(__('sphinx-quickstart will not overwrite the existing file.'))
print()
d['master'] = do_prompt(__('Please enter a new file name, or rename the '
'existing file and press Enter'), d['master'])
if 'extensions' not in d:
print(__('Indicate which of the following Sphinx extensions should be enabled:'))
d['extensions'] = []
for name, description in EXTENSIONS.items():
if do_prompt('%s: %s (y/n)' % (name, description), 'n', boolean):
d['extensions'].append('sphinx.ext.%s' % name)
# Handle conflicting options
if {'sphinx.ext.imgmath', 'sphinx.ext.mathjax'}.issubset(d['extensions']):
print(__('Note: imgmath and mathjax cannot be enabled at the same time. '
'imgmath has been deselected.'))
d['extensions'].remove('sphinx.ext.imgmath')
if 'makefile' not in d:
print()
print(__('A Makefile and a Windows command file can be generated for you so that you\n'
'only have to run e.g. `make html\' instead of invoking sphinx-build\n'
'directly.'))
d['makefile'] = do_prompt(__('Create Makefile? (y/n)'), 'y', boolean)
if 'batchfile' not in d:
d['batchfile'] = do_prompt(__('Create Windows command file? (y/n)'), 'y', boolean)
print()
def generate(d: Dict, overwrite: bool = True, silent: bool = False, templatedir: str = None
) -> None:
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
d['root_doc'] = d['master']
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
d.setdefault('extensions', [])
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d["path"] = os.path.abspath(d['path'])
ensuredir(d['path'])
srcdir = path.join(d['path'], 'source') if d['sep'] else d['path']
ensuredir(srcdir)
if d['sep']:
builddir = path.join(d['path'], 'build')
d['exclude_patterns'] = ''
else:
builddir = path.join(srcdir, d['dot'] + 'build')
exclude_patterns = map(repr, [
d['dot'] + 'build',
'Thumbs.db', '.DS_Store',
])
d['exclude_patterns'] = ', '.join(exclude_patterns)
ensuredir(builddir)
ensuredir(path.join(srcdir, d['dot'] + 'templates'))
ensuredir(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath: str, content: str, newline: str = None) -> None:
if overwrite or not path.isfile(fpath):
if 'quiet' not in d:
print(__('Creating file %s.') % fpath)
with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
f.write(content)
else:
if 'quiet' not in d:
print(__('File %s already exists, skipping.') % fpath)
conf_path = os.path.join(templatedir, 'conf.py_t') if templatedir else None
if not conf_path or not path.isfile(conf_path):
conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
with open(conf_path) as f:
conf_text = f.read()
write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
masterfile = path.join(srcdir, d['master'] + d['suffix'])
if template._has_custom_template('quickstart/master_doc.rst_t'):
msg = ('A custom template `master_doc.rst_t` found. It has been renamed to '
'`root_doc.rst_t`. Please rename it on your project too.')
print(colorize('red', msg)) # RemovedInSphinx60Warning
write_file(masterfile, template.render('quickstart/master_doc.rst_t', d))
else:
write_file(masterfile, template.render('quickstart/root_doc.rst_t', d))
if d.get('make_mode') is True:
makefile_template = 'quickstart/Makefile.new_t'
batchfile_template = 'quickstart/make.bat.new_t'
else:
makefile_template = 'quickstart/Makefile_t'
batchfile_template = 'quickstart/make.bat_t'
if d['makefile'] is True:
d['rsrcdir'] = 'source' if d['sep'] else '.'
d['rbuilddir'] = 'build' if d['sep'] else d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'),
template.render(makefile_template, d), '\n')
if d['batchfile'] is True:
d['rsrcdir'] = 'source' if d['sep'] else '.'
d['rbuilddir'] = 'build' if d['sep'] else d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'),
template.render(batchfile_template, d), '\r\n')
if silent:
return
print()
print(bold(__('Finished: An initial directory structure has been created.')))
print()
print(__('You should now populate your master file %s and create other documentation\n'
'source files. ') % masterfile, end='')
if d['makefile'] or d['batchfile']:
print(__('Use the Makefile to build the docs, like so:\n'
' make builder'))
else:
print(__('Use the sphinx-build command to build the docs, like so:\n'
' sphinx-build -b builder %s %s') % (srcdir, builddir))
print(__('where "builder" is one of the supported builders, '
'e.g. html, latex or linkcheck.'))
print()
def valid_dir(d: Dict) -> bool:
dir = d['path']
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
if {'Makefile', 'make.bat'} & set(os.listdir(dir)):
return False
if d['sep']:
dir = os.path.join('source', dir)
if not path.exists(dir):
return True
if not path.isdir(dir):
return False
reserved_names = [
'conf.py',
d['dot'] + 'static',
d['dot'] + 'templates',
d['master'] + d['suffix'],
]
if set(reserved_names) & set(os.listdir(dir)):
return False
return True
def get_parser() -> argparse.ArgumentParser:
description = __(
"\n"
"Generate required files for a Sphinx project.\n"
"\n"
"sphinx-quickstart is an interactive tool that asks some questions about your\n"
"project and then generates a complete documentation directory and sample\n"
"Makefile to be used with sphinx-build.\n"
)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] <PROJECT_DIR>',
epilog=__("For more information, visit <https://www.sphinx-doc.org/>."),
description=description)
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet',
default=None,
help=__('quiet mode'))
parser.add_argument('--version', action='version', dest='show_version',
version='%%(prog)s %s' % __display_version__)
parser.add_argument('path', metavar='PROJECT_DIR', default='.', nargs='?',
help=__('project root'))
group = parser.add_argument_group(__('Structure options'))
group.add_argument('--sep', action='store_true', dest='sep', default=None,
help=__('if specified, separate source and build dirs'))
group.add_argument('--no-sep', action='store_false', dest='sep',
help=__('if specified, create build dir under source dir'))
group.add_argument('--dot', metavar='DOT', default='_',
help=__('replacement for dot in _templates etc.'))
group = parser.add_argument_group(__('Project basic options'))
group.add_argument('-p', '--project', metavar='PROJECT', dest='project',
help=__('project name'))
group.add_argument('-a', '--author', metavar='AUTHOR', dest='author',
help=__('author names'))
group.add_argument('-v', metavar='VERSION', dest='version', default='',
help=__('version of project'))
group.add_argument('-r', '--release', metavar='RELEASE', dest='release',
help=__('release of project'))
group.add_argument('-l', '--language', metavar='LANGUAGE', dest='language',
help=__('document language'))
group.add_argument('--suffix', metavar='SUFFIX', default='.rst',
help=__('source file suffix'))
group.add_argument('--master', metavar='MASTER', default='index',
help=__('master document name'))
group.add_argument('--epub', action='store_true', default=False,
help=__('use epub'))
group = parser.add_argument_group(__('Extension options'))
for ext in EXTENSIONS:
group.add_argument('--ext-%s' % ext, action='append_const',
const='sphinx.ext.%s' % ext, dest='extensions',
help=__('enable %s extension') % ext)
group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
action='append', help=__('enable arbitrary extensions'))
group = parser.add_argument_group(__('Makefile and Batchfile creation'))
group.add_argument('--makefile', action='store_true', dest='makefile', default=True,
help=__('create makefile'))
group.add_argument('--no-makefile', action='store_false', dest='makefile',
help=__('do not create makefile'))
group.add_argument('--batchfile', action='store_true', dest='batchfile', default=True,
help=__('create batchfile'))
group.add_argument('--no-batchfile', action='store_false',
dest='batchfile',
help=__('do not create batchfile'))
group.add_argument('-m', '--use-make-mode', action='store_true',
dest='make_mode', default=True,
help=__('use make-mode for Makefile/make.bat'))
group.add_argument('-M', '--no-use-make-mode', action='store_false',
dest='make_mode',
help=__('do not use make-mode for Makefile/make.bat'))
group = parser.add_argument_group(__('Project templating'))
group.add_argument('-t', '--templatedir', metavar='TEMPLATEDIR',
dest='templatedir',
help=__('template directory for template files'))
group.add_argument('-d', metavar='NAME=VALUE', action='append',
dest='variables',
help=__('define a template variable'))
return parser
def main(argv: List[str] = sys.argv[1:]) -> int:
sphinx.locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')
if not color_terminal():
nocolor()
# parse options
parser = get_parser()
try:
args = parser.parse_args(argv)
except SystemExit as err:
return err.code
d = vars(args)
# delete None or False value
d = {k: v for k, v in d.items() if v is not None}
# handle use of CSV-style extension values
d.setdefault('extensions', [])
for ext in d['extensions'][:]:
if ',' in ext:
d['extensions'].remove(ext)
d['extensions'].extend(ext.split(','))
try:
if 'quiet' in d:
if not {'project', 'author'}.issubset(d):
print(__('"quiet" is specified, but any of "project" or '
'"author" is not specified.'))
return 1
if {'quiet', 'project', 'author'}.issubset(d):
# quiet mode with all required params satisfied, use default
d.setdefault('version', '')
d.setdefault('release', d['version'])
d2 = DEFAULTS.copy()
d2.update(d)
d = d2
if not valid_dir(d):
print()
print(bold(__('Error: specified path is not a directory, or sphinx'
' files already exist.')))
print(__('sphinx-quickstart only generate into a empty directory.'
' Please specify a new root path.'))
return 1
else:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return 130 # 128 + SIGINT
for variable in d.get('variables', []):
try:
name, value = variable.split('=')
d[name] = value
except ValueError:
print(__('Invalid template variable: %s') % variable)
generate(d, overwrite=False, templatedir=args.templatedir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 39.199673 | 118 | 0.58866 |
662ff00e75076ee6152f0abd5c703559d2f9b2bf | 5,897 | py | Python | queue_services/entity-filer/tests/unit/filing_processors/test_alteration.py | vysakh-menon-aot/lear | 7bae45efa2f9f89a7e826567c85de55fde68e09e | [
"Apache-2.0"
] | 8 | 2019-06-19T16:16:15.000Z | 2021-08-28T23:56:40.000Z | queue_services/entity-filer/tests/unit/filing_processors/test_alteration.py | vysakh-menon-aot/lear | 7bae45efa2f9f89a7e826567c85de55fde68e09e | [
"Apache-2.0"
] | 796 | 2019-03-07T19:25:50.000Z | 2022-03-31T20:32:57.000Z | queue_services/entity-filer/tests/unit/filing_processors/test_alteration.py | vysakh-menon-aot/lear | 7bae45efa2f9f89a7e826567c85de55fde68e09e | [
"Apache-2.0"
] | 82 | 2019-01-30T20:06:14.000Z | 2022-03-29T20:38:31.000Z | # Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Unit Tests for the Incorporation filing."""
import copy
import random
from datetime import datetime
from typing import Final
import pytest
from legal_api.models import Business, Filing
from registry_schemas.example_data import (
ALTERATION,
ALTERATION_FILING_TEMPLATE,
BUSINESS,
COURT_ORDER,
FILING_HEADER,
)
from entity_filer.filing_meta import FilingMeta
from entity_filer.filing_processors import alteration
from entity_filer.worker import process_filing
from tests.unit import create_business, create_filing
CONTACT_POINT = {
'email': 'no_one@never.get',
'phone': '123-456-7890'
}
@pytest.mark.parametrize(
'orig_legal_type, new_legal_type',
[
(Business.LegalTypes.COMP.value, Business.LegalTypes.BCOMP.value),
(Business.LegalTypes.BCOMP.value, Business.LegalTypes.COMP.value)
]
)
def test_alteration_process(app, session, orig_legal_type, new_legal_type):
"""Assert that the business legal type is altered."""
# setup
identifier = 'BC1234567'
business = create_business(identifier)
business.legal_type = orig_legal_type
alteration_filing = copy.deepcopy(FILING_HEADER)
alteration_filing['filing']['business']['legalType'] = orig_legal_type
alteration_filing['filing']['alteration'] = copy.deepcopy(ALTERATION)
alteration_filing['filing']['alteration']['business']['legalType'] = new_legal_type
payment_id = str(random.SystemRandom().getrandbits(0x58))
filing_submission = create_filing(payment_id, alteration_filing, business_id=business.id)
filing_meta = FilingMeta()
# test
alteration.process(business=business,
filing_submission=filing_submission,
filing=alteration_filing['filing'],
filing_meta=filing_meta)
# validate
assert business.legal_type == new_legal_type
@pytest.mark.parametrize(
'orig_legal_type, new_legal_type',
[
(Business.LegalTypes.COMP.value, Business.LegalTypes.BCOMP.value),
(Business.LegalTypes.BCOMP.value, Business.LegalTypes.COMP.value)
]
)
async def test_worker_alteration(app, session, mocker, orig_legal_type, new_legal_type):
"""Assert the worker process calls the alteration correctly."""
identifier = 'BC1234567'
business = create_business(identifier, legal_type=orig_legal_type)
filing = copy.deepcopy(ALTERATION_FILING_TEMPLATE)
filing['filing']['business']['legalType'] = orig_legal_type
filing['filing']['alteration']['business']['legalType'] = new_legal_type
payment_id = str(random.SystemRandom().getrandbits(0x58))
filing_id = (create_filing(payment_id, filing, business_id=business.id)).id
filing_msg = {'filing': {'id': filing_id}}
# mock out the email sender and event publishing
mocker.patch('entity_filer.worker.publish_email_message', return_value=None)
mocker.patch('entity_filer.worker.publish_event', return_value=None)
mocker.patch('entity_filer.filing_processors.filing_components.name_request.consume_nr', return_value=None)
mocker.patch('entity_filer.filing_processors.filing_components.business_profile.update_business_profile',
return_value=None)
mocker.patch('legal_api.services.bootstrap.AccountService.update_entity', return_value=None)
# Test
await process_filing(filing_msg, app)
# Check outcome
business = Business.find_by_internal_id(business.id)
assert business.legal_type == new_legal_type
async def test_worker_alteration_court_order(app, session, mocker):
"""Assert the worker process calls the alteration correctly."""
identifier = 'BC1234567'
business = create_business(identifier, legal_type='BC')
file_number: Final = '#1234-5678/90'
order_date: Final = '2021-01-30T09:56:01+08:00'
effect_of_order: Final = 'hasPlan'
filing = copy.deepcopy(FILING_HEADER)
filing['filing']['alteration'] = {}
filing['filing']['alteration']['business'] = BUSINESS
filing['filing']['alteration']['contactPoint'] = CONTACT_POINT
filing['filing']['alteration']['courtOrder'] = COURT_ORDER
filing['filing']['alteration']['courtOrder']['effectOfOrder'] = effect_of_order
payment_id = str(random.SystemRandom().getrandbits(0x58))
filing_id = (create_filing(payment_id, filing, business_id=business.id)).id
filing_msg = {'filing': {'id': filing_id}}
# mock out the email sender and event publishing
mocker.patch('entity_filer.worker.publish_email_message', return_value=None)
mocker.patch('entity_filer.worker.publish_event', return_value=None)
mocker.patch('entity_filer.filing_processors.filing_components.name_request.consume_nr', return_value=None)
mocker.patch('entity_filer.filing_processors.filing_components.business_profile.update_business_profile',
return_value=None)
mocker.patch('legal_api.services.bootstrap.AccountService.update_entity', return_value=None)
# Test
await process_filing(filing_msg, app)
# Check outcome
final_filing = Filing.find_by_id(filing_id)
assert file_number == final_filing.court_order_file_number
assert datetime.fromisoformat(order_date) == final_filing.court_order_date
assert effect_of_order == final_filing.court_order_effect_of_order
| 39.577181 | 111 | 0.744446 |
efec5c9369ffee7504a3f25914739553d99058e0 | 4,611 | py | Python | wlanpi_core/api/api_v1/endpoints/network_api.py | WLAN-Pi/wlanpi-core | 7c626dac990c8240b45813ab5878041d1f3569c4 | [
"BSD-3-Clause"
] | 1 | 2021-09-07T05:30:02.000Z | 2021-09-07T05:30:02.000Z | wlanpi_core/api/api_v1/endpoints/network_api.py | WLAN-Pi/wlanpi-core | 7c626dac990c8240b45813ab5878041d1f3569c4 | [
"BSD-3-Clause"
] | 13 | 2021-09-12T15:42:03.000Z | 2022-02-21T22:20:54.000Z | wlanpi_core/api/api_v1/endpoints/network_api.py | WLAN-Pi/wlanpi-core | 7c626dac990c8240b45813ab5878041d1f3569c4 | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
from fastapi import APIRouter, Response
from starlette.responses import JSONResponse
from wlanpi_core.models.validation_error import ValidationError
from wlanpi_core.schemas import network
from wlanpi_core.services import network_service
router = APIRouter()
log = logging.getLogger("uvicorn")
@router.get("/neighbors") # , response_model=network.Neighbors)
async def show_neighbors():
"""
Run `lldpcli show neighbors -f json` and relay results to consumer.
TODO: remove test psuedo code, this is what Swagger UI is for:
```
import urllib.request,json,pprint
with urllib.request.urlopen('http://[WLANPI]/api/v1/network/neighbors') as resp:
data = json.loads(resp.read().decode())
pprint.pprint(data)
```
"""
try:
resp = await network_service.get_neighbor_results()
return json.loads(resp)
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500)
@router.get("/publicipv4", response_model=network.PublicIP)
async def retrieve_public_ip_information():
"""
publicip leverages the `ifconfig.co/json` service to retrieve public IP information.
"""
try:
return await network_service.get_public_ipv4()
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500)
@router.get("/publicipv6", response_model=network.PublicIP)
async def retrieve_public_ip_information():
"""
publicip leverages the `ifconfig.co/json` service to retrieve public IP information.
"""
try:
return await network_service.get_public_ipv6()
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500)
@router.get("/localipv4")
async def get_local_ipv4():
"""
Return the determined primary local IPv4 address without a given interface.
TODO: Test get_local_ipv4() when Pi has no connectivity. Abstract out to a service.
"""
try:
return await network_service.get_local_ipv4()
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500)
@router.get("/localipv6")
async def get_local_ipv6():
"""
Return the determined primary local IPv6 address without a given interface.
TODO: Test get_local_ipv6() when Pi has no connectivity. Abstract out to a service.
"""
try:
return await network_service.get_local_ipv6()
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500)
@router.get("/ipv4_reachability")
async def get_ipv4_internet_reachability(host="8.8.8.8", port=53, timeout=3):
"""
Get IPv4 reachability to Internet from the Pi.
"""
try:
if network_service.get_ipv4_internet_reachability(host, port, timeout):
return JSONResponse(
content={"reachability": True, "host": host, "port": port},
status_code=200,
)
else:
return JSONResponse(
content={"reachability": False, "host": host, "port": port},
status_code=404,
)
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500)
@router.get("/ipv6_reachability")
async def get_ipv6_internet_reachability(host="2001:4860:4860::8888", port=53, timeout=3):
"""
Get IPv6 reachability to Internet from the Pi.
"""
try:
if network_service.get_ipv4_internet_reachability(host, port, timeout):
return JSONResponse(
content={"reachability": True, "host": host, "port": port},
status_code=200,
)
else:
return JSONResponse(
content={"reachability": False, "host": host, "port": port},
status_code=404,
)
except ValidationError as ve:
return Response(content=ve.error_msg, status_code=ve.status_code)
except Exception as ex:
return Response(content=str(ex), status_code=500) | 33.904412 | 90 | 0.679896 |
0bcacbe7f47c821cb8c1a705b44ae138a8b3a3bd | 386 | py | Python | Food/migrations/0011_menuitem_position.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | 1 | 2021-08-12T08:46:56.000Z | 2021-08-12T08:46:56.000Z | Food/migrations/0011_menuitem_position.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | null | null | null | Food/migrations/0011_menuitem_position.py | PopaGabriel/Food-Ecommerce-Site | 7f7cde94939f02f13df5afa865cddc72981481e2 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-10-10 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Food', '0010_alter_menuitem_image'),
]
operations = [
migrations.AddField(
model_name='menuitem',
name='position',
field=models.IntegerField(default=0),
),
]
| 20.315789 | 49 | 0.598446 |
cfc9a45a1d22745dde072ab72a62a4aea85626f6 | 1,847 | py | Python | test/gst-msdk/vpp/brightness.py | ChipsnMedia/vaapi-fits | c7671a2c4d331c1031b89b799d196fbca85fe2d0 | [
"BSD-3-Clause"
] | null | null | null | test/gst-msdk/vpp/brightness.py | ChipsnMedia/vaapi-fits | c7671a2c4d331c1031b89b799d196fbca85fe2d0 | [
"BSD-3-Clause"
] | null | null | null | test/gst-msdk/vpp/brightness.py | ChipsnMedia/vaapi-fits | c7671a2c4d331c1031b89b799d196fbca85fe2d0 | [
"BSD-3-Clause"
] | 1 | 2021-07-15T02:07:26.000Z | 2021-07-15T02:07:26.000Z | ###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ....lib.gstreamer.msdk.util import *
from ....lib.gstreamer.msdk.vpp import VppTest
spec = load_test_spec("vpp", "brightness")
spec_r2r = load_test_spec("vpp", "brightness", "r2r")
@slash.requires(*platform.have_caps("vpp", "brightness"))
class default(VppTest):
def before(self):
vars(self).update(
caps = platform.get_caps("vpp", "brightness"),
vpp_op = "brightness",
NOOP = 50 # i.e. 0.0 in msdkvpp range should result in no-op result
)
super(default, self).before()
def init(self, tspec, case, level):
vars(self).update(tspec[case].copy())
vars(self).update(
case = case,
level = level,
)
@slash.parametrize(*gen_vpp_brightness_parameters(spec))
def test(self, case, level):
self.init(spec, case, level)
self.vpp()
@slash.parametrize(*gen_vpp_brightness_parameters(spec_r2r))
def test_r2r(self, case, level):
self.init(spec_r2r, case, level)
vars(self).setdefault("r2r", 5)
self.vpp()
def check_metrics(self):
psnr = calculate_psnr(
self.source, self.ofile,
self.width, self.height,
self.frames, self.format)
def compare(k, ref, actual):
assert psnr[-2] == 100, "Cb(U) should not be affected by BRIGHTNESS filter"
assert psnr[-1] == 100, "Cr(V) should not be affected by BRIGHTNESS filter"
if self.level == self.NOOP:
assert psnr[-3] == 100, "Luma (Y) should not be affected at NOOP level"
else:
assert ref is not None, "Invalid reference value"
assert abs(ref[-3] - actual[-3]) < 0.2, "Luma (Y) out of baseline range"
get_media().baseline.check_result(
compare = compare, context = self.refctx, psnr = psnr)
| 31.305085 | 81 | 0.639415 |
277b4acbc1def45961351e8326b49eea71cb7342 | 1,093 | py | Python | lists_dictionary/Bread Factory.py | vasetousa/Python-fundamentals | 3180c03de28b4f4d36d966221719069a7e18e521 | [
"MIT"
] | null | null | null | lists_dictionary/Bread Factory.py | vasetousa/Python-fundamentals | 3180c03de28b4f4d36d966221719069a7e18e521 | [
"MIT"
] | null | null | null | lists_dictionary/Bread Factory.py | vasetousa/Python-fundamentals | 3180c03de28b4f4d36d966221719069a7e18e521 | [
"MIT"
] | null | null | null | events = input().split("|")
energy = 100
coins = 100
for el in range(len(events)):
string = events[el]
order, value = string.split("-")
value = int(value)
if order == "rest":
energy += value
if energy > 100:
energy = 100
difference = 100 - energy
print(f"You gained {difference} energy.")
print(f"Current energy: {energy}.")
else:
print(f"You gained {value} energy.")
print(f"Current energy: {energy}.")
elif order == "order":
if energy >= 30:
energy -= 30
coins += value
print(f"You earned {value} coins.")
else:
energy += 50
print(f"You had to rest!")
else:
if coins > 0:
coins -= value
if coins > 0:
print(f"You bought {order}.")
else:
print(f"Closed! Cannot afford {order}.")
break
if coins > 0:
print(f"""Day completed!
Coins: {coins}
Energy: {energy}""")
# rest-2|order-10|eggs-100|rest-10 | 25.418605 | 56 | 0.486734 |
bcecbad62a1b03d29a9ffb2b0663d02dd5fe10d2 | 779 | py | Python | message/backends/email.py | gucciwu/cycling-cap-pyrenees | 66f62dc5c074029801cddde3319a507d8c182374 | [
"MIT"
] | null | null | null | message/backends/email.py | gucciwu/cycling-cap-pyrenees | 66f62dc5c074029801cddde3319a507d8c182374 | [
"MIT"
] | null | null | null | message/backends/email.py | gucciwu/cycling-cap-pyrenees | 66f62dc5c074029801cddde3319a507d8c182374 | [
"MIT"
] | null | null | null | import logging
from django.core.mail import send_mail
from message.backends.Base import BaseEmailBackend
from entry import settings
from message.exceptions import EmailException
from message.models import Message
logger = logging.getLogger(__name__)
class DjangoEmailBackend(BaseEmailBackend):
@staticmethod
def send(message: Message):
try:
return send_mail(subject=message.title,
from_email=settings.SERVER_EMAIL,
recipient_list=[message.receiver.email],
message=message.content,
html_message=message.content,
fail_silently=False)
except EmailException as err:
logger.warning(err)
| 32.458333 | 69 | 0.634146 |
c3d44a9bd633d0454326fb97bda3ea9e07cf1fc3 | 12,485 | py | Python | sdk/python/pulumi_azure_native/compute/v20150615/virtual_machine_extension.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20150615/virtual_machine_extension.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/v20150615/virtual_machine_extension.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualMachineExtension']
class VirtualMachineExtension(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
instance_view: Optional[pulumi.Input[pulumi.InputType['VirtualMachineExtensionInstanceViewArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_extension_name: Optional[pulumi.Input[str]] = None,
vm_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Describes a Virtual Machine Extension.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] force_update_tag: How the extension handler should be forced to update even if the extension configuration has not changed.
:param pulumi.Input[pulumi.InputType['VirtualMachineExtensionInstanceViewArgs']] instance_view: The virtual machine extension instance view.
:param pulumi.Input[str] location: Resource location
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vm_extension_name: The name of the virtual machine extension.
:param pulumi.Input[str] vm_name: The name of the virtual machine where the extension should be created or updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_upgrade_minor_version'] = auto_upgrade_minor_version
__props__['force_update_tag'] = force_update_tag
__props__['instance_view'] = instance_view
__props__['location'] = location
__props__['protected_settings'] = protected_settings
__props__['publisher'] = publisher
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['settings'] = settings
__props__['tags'] = tags
__props__['type'] = type
__props__['type_handler_version'] = type_handler_version
__props__['vm_extension_name'] = vm_extension_name
if vm_name is None and not opts.urn:
raise TypeError("Missing required property 'vm_name'")
__props__['vm_name'] = vm_name
__props__['name'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20150615:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/latest:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/latest:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20160330:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20160330:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20160430preview:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20170330:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20171201:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20180401:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20180601:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20181001:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20190301:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20190701:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20191201:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20200601:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:VirtualMachineExtension"), pulumi.Alias(type_="azure-native:compute/v20201201:VirtualMachineExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:VirtualMachineExtension")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualMachineExtension, __self__).__init__(
'azure-native:compute/v20150615:VirtualMachineExtension',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachineExtension':
"""
Get an existing VirtualMachineExtension resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["auto_upgrade_minor_version"] = None
__props__["force_update_tag"] = None
__props__["instance_view"] = None
__props__["location"] = None
__props__["name"] = None
__props__["protected_settings"] = None
__props__["provisioning_state"] = None
__props__["publisher"] = None
__props__["settings"] = None
__props__["tags"] = None
__props__["type"] = None
__props__["type_handler_version"] = None
return VirtualMachineExtension(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> pulumi.Output[Optional[str]]:
"""
How the extension handler should be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> pulumi.Output[Optional['outputs.VirtualMachineExtensionInstanceViewResponse']]:
"""
The virtual machine extension instance view.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> pulumi.Output[Optional[Any]]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def publisher(self) -> pulumi.Output[Optional[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def settings(self) -> pulumi.Output[Optional[Any]]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 53.814655 | 2,318 | 0.69139 |
b012e32788dc3ca91765ed0b393c572b5b6ae4b0 | 16,475 | py | Python | makibot/modules/misc.py | ThinkinCoin/custom-tg-bot | 3a7780941faa2e8c2723ac137ff65f8b9660dca7 | [
"MIT"
] | null | null | null | makibot/modules/misc.py | ThinkinCoin/custom-tg-bot | 3a7780941faa2e8c2723ac137ff65f8b9660dca7 | [
"MIT"
] | null | null | null | makibot/modules/misc.py | ThinkinCoin/custom-tg-bot | 3a7780941faa2e8c2723ac137ff65f8b9660dca7 | [
"MIT"
] | null | null | null | import html
import json
import random
from datetime import datetime
from typing import Optional, List
import requests
from telegram import Message, Chat, Update, Bot, MessageEntity
from telegram import ParseMode
from telegram.ext import CommandHandler, run_async, Filters
from telegram.utils.helpers import escape_markdown, mention_html
from makibot import dispatcher, OWNER_ID, SUDO_USERS, SUPPORT_USERS, WHITELIST_USERS, BAN_STICKER
from makibot.__main__ import GDPR
from makibot.__main__ import STATS, USER_INFO
from makibot.modules.disable import DisableAbleCommandHandler
from makibot.modules.helper_funcs.extraction import extract_user
from makibot.modules.helper_funcs.filters import CustomFilters
RUN_STRINGS = (
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"Get back here!",
"Not so fast...",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"Jokes on you, I'm everywhere",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"Go bother someone else, no-one here cares.",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
"\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person",
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"Legend has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"Legend has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
)
SLAP_TEMPLATES = (
"{user1} {hits} {user2} with a {item}.",
"{user1} {hits} {user2} in the face with a {item}.",
"{user1} {hits} {user2} around a bit with a {item}.",
"{user1} {throws} a {item} at {user2}.",
"{user1} grabs a {item} and {throws} it at {user2}'s face.",
"{user1} launches a {item} in {user2}'s general direction.",
"{user1} starts slapping {user2} silly with a {item}.",
"{user1} pins {user2} down and repeatedly {hits} them with a {item}.",
"{user1} grabs up a {item} and {hits} {user2} with it.",
"{user1} ties {user2} to a chair and {throws} a {item} at them.",
"{user1} gave a friendly push to help {user2} learn to swim in lava."
)
ITEMS = (
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"CRT monitor",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"rubber chicken",
"spiked bat",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
)
THROW = (
"throws",
"flings",
"chucks",
"hurls",
)
HIT = (
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
)
GMAPS_LOC = "https://maps.googleapis.com/maps/api/geocode/json"
GMAPS_TIME = "https://maps.googleapis.com/maps/api/timezone/json"
@run_async
def runs(bot: Bot, update: Update):
update.effective_message.reply_text(random.choice(RUN_STRINGS))
@run_async
def slap(bot: Bot, update: Update, args: List[str]):
msg = update.effective_message # type: Optional[Message]
# reply to correct message
reply_text = msg.reply_to_message.reply_text if msg.reply_to_message else msg.reply_text
# get user who sent message
if msg.from_user.username:
curr_user = "@" + escape_markdown(msg.from_user.username)
else:
curr_user = "[{}](tg://user?id={})".format(msg.from_user.first_name, msg.from_user.id)
user_id = extract_user(update.effective_message, args)
if user_id:
slapped_user = bot.get_chat(user_id)
user1 = curr_user
if slapped_user.username:
user2 = "@" + escape_markdown(slapped_user.username)
else:
user2 = "[{}](tg://user?id={})".format(slapped_user.first_name,
slapped_user.id)
# if no target found, bot targets the sender
else:
user1 = "[{}](tg://user?id={})".format(bot.first_name, bot.id)
user2 = curr_user
temp = random.choice(SLAP_TEMPLATES)
item = random.choice(ITEMS)
hit = random.choice(HIT)
throw = random.choice(THROW)
repl = temp.format(user1=user1, user2=user2, item=item, hits=hit, throws=throw)
reply_text(repl, parse_mode=ParseMode.MARKDOWN)
@run_async
def get_bot_ip(bot: Bot, update: Update):
""" Sends the bot's IP address, so as to be able to ssh in if necessary.
OWNER ONLY.
"""
res = requests.get("http://ipinfo.io/ip")
update.message.reply_text(res.text)
@run_async
def get_id(bot: Bot, update: Update, args: List[str]):
user_id = extract_user(update.effective_message, args)
if user_id:
if update.effective_message.reply_to_message and update.effective_message.reply_to_message.forward_from:
user1 = update.effective_message.reply_to_message.from_user
user2 = update.effective_message.reply_to_message.forward_from
update.effective_message.reply_text(
"The original sender, {}, has an ID of `{}`.\nThe forwarder, {}, has an ID of `{}`.".format(
escape_markdown(user2.first_name),
user2.id,
escape_markdown(user1.first_name),
user1.id),
parse_mode=ParseMode.MARKDOWN)
else:
user = bot.get_chat(user_id)
update.effective_message.reply_text("{}'s id is `{}`.".format(escape_markdown(user.first_name), user.id),
parse_mode=ParseMode.MARKDOWN)
else:
chat = update.effective_chat # type: Optional[Chat]
if chat.type == "private":
update.effective_message.reply_text("Your id is `{}`.".format(chat.id),
parse_mode=ParseMode.MARKDOWN)
else:
update.effective_message.reply_text("This group's id is `{}`.".format(chat.id),
parse_mode=ParseMode.MARKDOWN)
@run_async
def info(bot: Bot, update: Update, args: List[str]):
msg = update.effective_message # type: Optional[Message]
user_id = extract_user(update.effective_message, args)
if user_id:
user = bot.get_chat(user_id)
elif not msg.reply_to_message and not args:
user = msg.from_user
elif not msg.reply_to_message and (not args or (
len(args) >= 1 and not args[0].startswith("@") and not args[0].isdigit() and not msg.parse_entities(
[MessageEntity.TEXT_MENTION]))):
msg.reply_text("I can't extract a user from this.")
return
else:
return
text = "<b>User info</b>:" \
"\nID: <code>{}</code>" \
"\nFirst Name: {}".format(user.id, html.escape(user.first_name))
if user.last_name:
text += "\nLast Name: {}".format(html.escape(user.last_name))
if user.username:
text += "\nUsername: @{}".format(html.escape(user.username))
text += "\nPermanent user link: {}".format(mention_html(user.id, "link"))
if user.id == OWNER_ID:
text += "\n\nThis person is my owner - I would never do anything against them!"
else:
if user.id in SUDO_USERS:
text += "\nThis person is one of my sudo users! " \
"Nearly as powerful as my owner - so watch it."
else:
if user.id in SUPPORT_USERS:
text += "\nThis person is one of my support users! " \
"Not quite a sudo user, but can still gban you off the map."
if user.id in WHITELIST_USERS:
text += "\nThis person has been whitelisted! " \
"That means I'm not allowed to ban/kick them."
for mod in USER_INFO:
mod_info = mod.__user_info__(user.id).strip()
if mod_info:
text += "\n\n" + mod_info
update.effective_message.reply_text(text, parse_mode=ParseMode.HTML)
@run_async
def get_time(bot: Bot, update: Update, args: List[str]):
location = " ".join(args)
if location.lower() == bot.first_name.lower():
update.effective_message.reply_text("Its always banhammer time for me!")
bot.send_sticker(update.effective_chat.id, BAN_STICKER)
return
res = requests.get(GMAPS_LOC, params=dict(address=location))
if res.status_code == 200:
loc = json.loads(res.text)
if loc.get('status') == 'OK':
lat = loc['results'][0]['geometry']['location']['lat']
long = loc['results'][0]['geometry']['location']['lng']
country = None
city = None
address_parts = loc['results'][0]['address_components']
for part in address_parts:
if 'country' in part['types']:
country = part.get('long_name')
if 'administrative_area_level_1' in part['types'] and not city:
city = part.get('long_name')
if 'locality' in part['types']:
city = part.get('long_name')
if city and country:
location = "{}, {}".format(city, country)
elif country:
location = country
timenow = int(datetime.utcnow().timestamp())
res = requests.get(GMAPS_TIME, params=dict(location="{},{}".format(lat, long), timestamp=timenow))
if res.status_code == 200:
offset = json.loads(res.text)['dstOffset']
timestamp = json.loads(res.text)['rawOffset']
time_there = datetime.fromtimestamp(timenow + timestamp + offset).strftime("%H:%M:%S on %A %d %B")
update.message.reply_text("It's {} in {}".format(time_there, location))
@run_async
def echo(bot: Bot, update: Update):
args = update.effective_message.text.split(None, 1)
message = update.effective_message
if message.reply_to_message:
message.reply_to_message.reply_text(args[1])
else:
message.reply_text(args[1], quote=False)
message.delete()
@run_async
def gdpr(bot: Bot, update: Update):
update.effective_message.reply_text("Deleting identifiable data...")
for mod in GDPR:
mod.__gdpr__(update.effective_user.id)
update.effective_message.reply_text("Your personal data has been deleted.\n\nNote that this will not unban "
"you from any chats, as that is telegram data, not MakiBot data. "
"Flooding, warns, and gbans are also preserved, as of "
"[this](https://ico.org.uk/for-organisations/guide-to-the-general-data-protection-regulation-gdpr/individual-rights/right-to-erasure/), "
"which clearly states that the right to erasure does not apply "
"\"for the performance of a task carried out in the public interest\", as is "
"the case for the aforementioned pieces of data.",
parse_mode=ParseMode.MARKDOWN)
MARKDOWN_HELP = """
Markdown is a very powerful formatting tool supported by telegram. {} has some enhancements, to make sure that \
saved messages are correctly parsed, and to allow you to create buttons.
- <code>_italic_</code>: wrapping text with '_' will produce italic text
- <code>*bold*</code>: wrapping text with '*' will produce bold text
- <code>`code`</code>: wrapping text with '`' will produce monospaced text, also known as 'code'
- <code>[sometext](someURL)</code>: this will create a link - the message will just show <code>sometext</code>, \
and tapping on it will open the page at <code>someURL</code>.
EG: <code>[test](example.com)</code>
- <code>[buttontext](buttonurl:someURL)</code>: this is a special enhancement to allow users to have telegram \
buttons in their markdown. <code>buttontext</code> will be what is displayed on the button, and <code>someurl</code> \
will be the url which is opened.
EG: <code>[This is a button](buttonurl:example.com)</code>
If you want multiple buttons on the same line, use :same, as such:
<code>[one](buttonurl://example.com)
[two](buttonurl://google.com:same)</code>
This will create two buttons on a single line, instead of one button per line.
Keep in mind that your message <b>MUST</b> contain some text other than just a button!
""".format(dispatcher.bot.first_name)
@run_async
def markdown_help(bot: Bot, update: Update):
update.effective_message.reply_text(MARKDOWN_HELP, parse_mode=ParseMode.HTML)
update.effective_message.reply_text("Try forwarding the following message to me, and you'll see!")
update.effective_message.reply_text("/save test This is a markdown test. _italics_, *bold*, `code`, "
"[URL](example.com) [button](buttonurl:github.com) "
"[button2](buttonurl://google.com:same)")
@run_async
def stats(bot: Bot, update: Update):
update.effective_message.reply_text("Current stats:\n" + "\n".join([mod.__stats__() for mod in STATS]))
# /ip is for private use
__help__ = """
- /id: get the current group id. If used by replying to a message, gets that user's id.
- /runs: reply a random string from an array of replies.
- /slap: slap a user, or get slapped if not a reply.
- /info: get information about a user.
- /gdpr: deletes your information from the bot's database. Private chats only.
- /markdownhelp: quick summary of how markdown works in telegram - can only be called in private chats.
"""
__mod_name__ = "Misc"
ID_HANDLER = DisableAbleCommandHandler("id", get_id, pass_args=True)
IP_HANDLER = CommandHandler("ip", get_bot_ip, filters=Filters.chat(OWNER_ID))
TIME_HANDLER = CommandHandler("time", get_time, pass_args=True)
RUNS_HANDLER = DisableAbleCommandHandler("runs", runs)
SLAP_HANDLER = DisableAbleCommandHandler("slap", slap, pass_args=True)
INFO_HANDLER = DisableAbleCommandHandler("info", info, pass_args=True)
ECHO_HANDLER = CommandHandler("echo", echo, filters=Filters.user(OWNER_ID))
MD_HELP_HANDLER = CommandHandler("markdownhelp", markdown_help, filters=Filters.private)
STATS_HANDLER = CommandHandler("stats", stats, filters=CustomFilters.sudo_filter)
GDPR_HANDLER = CommandHandler("gdpr", gdpr, filters=Filters.private)
dispatcher.add_handler(ID_HANDLER)
dispatcher.add_handler(IP_HANDLER)
# dispatcher.add_handler(TIME_HANDLER)
dispatcher.add_handler(RUNS_HANDLER)
dispatcher.add_handler(SLAP_HANDLER)
dispatcher.add_handler(INFO_HANDLER)
dispatcher.add_handler(ECHO_HANDLER)
dispatcher.add_handler(MD_HELP_HANDLER)
dispatcher.add_handler(STATS_HANDLER)
dispatcher.add_handler(GDPR_HANDLER)
| 39.319809 | 177 | 0.639697 |
1999ad074916e8ac747fd7ca4a69e659366c28c0 | 1,890 | py | Python | results-processor/gsutil.py | CYBAI/wpt.fyi | cbe016a911e104a7fd58ba1a7db22ea8bbe2da16 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T18:49:47.000Z | 2021-01-07T18:49:47.000Z | results-processor/gsutil.py | CYBAI/wpt.fyi | cbe016a911e104a7fd58ba1a7db22ea8bbe2da16 | [
"BSD-3-Clause"
] | null | null | null | results-processor/gsutil.py | CYBAI/wpt.fyi | cbe016a911e104a7fd58ba1a7db22ea8bbe2da16 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import subprocess
_log = logging.getLogger(__name__)
def _call(command, quiet=False):
_log.info('EXEC%s: %s',
'(quiet)' if quiet else '',
' '.join(command))
if quiet:
subprocess.check_call(command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
else:
subprocess.check_call(command)
def gs_to_public_url(gcs_path):
assert gcs_path.startswith('gs://')
return gcs_path.replace('gs://', 'https://storage.googleapis.com/', 1)
def rsync_gzip(path1, path2, quiet=False):
"""Syncs path1 to path2 with gsutil rsync.
All files in path1 are considered gzipped, and the 'Content-Encoding:gzip'
header will be set for all files.
Args:
path1, path2: The source and destination paths (must be directories).
"""
# Use parallel processes and no multithreading to avoid Python GIL.
# https://cloud.google.com/storage/docs/gsutil/commands/rsync#options
command = [
'gsutil', '-o', 'GSUtil:parallel_process_count=10',
'-o', 'GSUtil:parallel_thread_count=1',
'-m', '-h', 'Content-Encoding:gzip', 'rsync', '-r',
path1, path2
]
_call(command, quiet)
def copy(path1, path2, gzipped=False, quiet=False):
"""Copies path1 to path2 with gsutil cp.
Args:
path1, path2: The source and destination paths.
gzipped: Whether path1 is gzipped (if True, 'Content-Encoding:gzip'
will be added to the headers).
"""
command = ['gsutil', '-m']
if gzipped:
command += ['-h', 'Content-Encoding:gzip']
command += ['cp', '-r', path1, path2]
_call(command, quiet)
| 30.483871 | 78 | 0.628042 |
af81602dab309a46e3da4f2622a4fb223b9662d6 | 1,273 | py | Python | git_report.py | JMast3rs/netcfgbu-plugin-teams | e48da57602dac4df160c546175447614ba7772a2 | [
"MIT"
] | null | null | null | git_report.py | JMast3rs/netcfgbu-plugin-teams | e48da57602dac4df160c546175447614ba7772a2 | [
"MIT"
] | null | null | null | git_report.py | JMast3rs/netcfgbu-plugin-teams | e48da57602dac4df160c546175447614ba7772a2 | [
"MIT"
] | null | null | null |
import pymsteams, requests
teams_webhook_url = "<< Insert Teams Webhook URL>>"
teams_git_repository_url = "<< Insert GitLab Repo URL (without .git) >>"
teams_git_repository_id = "<< Insert GitLab Repo ID >>"
teams_git_token = "<< Insert GitLab Access Token >>"
class Teams_Git(Plugin):
name = "Teams_Git"
def git_report(success, tag_name):
message = pymsteams.connectorcard(teams_webhook_url_vcs)
if success:
res = requests.get(f"https://gitlabs.com/api/v4/projects/{teams_git_repository_id}/repository/tags?private_token={teams_git_token}").json()
previous_repo = res[1]["name"]
message.title("Configuration Change Detected")
message.text("Successfully pushed to git.")
message.addLinkButton("View Config", f"{teams_git_repository_url}/-/tree/{tag_name}")
message.addLinkButton("View Changes", f"{teams_git_repository_url}/-/compare/{previous_repo}...{tag_name}")
message.color("#4EE73C")
message.send()
print(f"Previous Config Tag: {previous_repo}, New Config Tag: {tag_name}")
else:
message.title("No Configureation Change Detected")
message.text("Skipping git push.")
message.send() | 38.575758 | 151 | 0.658288 |
be1c8cf1de2fa6ec7a423cd2a4279667c11ff31e | 8,434 | py | Python | poetry/puzzle/solver.py | tadeoos/poetry | de73fa07386be26d32bf15044fd81bf979787b9f | [
"MIT"
] | null | null | null | poetry/puzzle/solver.py | tadeoos/poetry | de73fa07386be26d32bf15044fd81bf979787b9f | [
"MIT"
] | null | null | null | poetry/puzzle/solver.py | tadeoos/poetry | de73fa07386be26d32bf15044fd81bf979787b9f | [
"MIT"
] | null | null | null | from typing import List
from poetry.mixology import resolve_version
from poetry.mixology.failure import SolveFailure
from poetry.packages.constraints.generic_constraint import GenericConstraint
from poetry.semver import parse_constraint
from .exceptions import SolverProblemError
from .operations import Install
from .operations import Uninstall
from .operations import Update
from .operations.operation import Operation
from .provider import Provider
class Solver:
def __init__(self, package, pool, installed, locked, io):
self._package = package
self._pool = pool
self._installed = installed
self._locked = locked
self._io = io
def solve(self, use_latest=None): # type: (...) -> List[Operation]
provider = Provider(self._package, self._pool, self._io)
locked = {}
for package in self._locked.packages:
locked[package.name] = package
try:
result = resolve_version(
self._package, provider, locked=locked, use_latest=use_latest
)
except SolveFailure as e:
raise SolverProblemError(e)
packages = result.packages
requested = self._package.all_requires
graph = self._build_graph(self._package, packages)
for package in packages:
category, optional, python, platform = self._get_tags_for_package(
package, graph
)
package.category = category
package.optional = optional
# If requirements are empty, drop them
requirements = {}
if python is not None and python != "*":
requirements["python"] = python
if platform is not None and platform != "*":
requirements["platform"] = platform
package.requirements = requirements
operations = []
for package in packages:
installed = False
for pkg in self._installed.packages:
if package.name == pkg.name:
installed = True
# Checking version
if package.version != pkg.version:
operations.append(Update(pkg, package))
else:
operations.append(Install(package).skip("Already installed"))
break
if not installed:
operations.append(Install(package))
# Checking for removals
for pkg in self._locked.packages:
remove = True
for package in packages:
if pkg.name == package.name:
remove = False
break
if remove:
skip = True
for installed in self._installed.packages:
if installed.name == pkg.name:
skip = False
break
op = Uninstall(pkg)
if skip:
op.skip("Not currently installed")
operations.append(op)
requested_names = [r.name for r in self._package.all_requires]
return sorted(
operations,
key=lambda o: (
1 if o.package.name in requested_names else 0,
o.package.name,
),
)
def _build_graph(self, package, packages, previous=None, dep=None):
if not previous:
category = "dev"
optional = True
python_version = None
platform = None
else:
category = dep.category
optional = dep.is_optional() and not dep.is_activated()
python_version = (
dep.python_versions
if previous.python_constraint.allows_all(dep.python_constraint)
else previous.python_versions
)
platform = (
dep.platform
if previous.platform_constraint.matches(dep.platform_constraint)
and dep.platform != "*"
else previous.platform
)
graph = {
"name": package.name,
"category": category,
"optional": optional,
"python_version": python_version,
"platform": platform,
"children": [],
}
if previous and previous is not dep and previous.name == dep.name:
return graph
for dependency in package.all_requires:
if dependency.is_optional():
if not package.is_root() and (not dep or not dep.extras):
continue
is_activated = False
for group, extras in package.extras.items():
if dep:
extras = dep.extras
elif package.is_root():
extras = package.extras
else:
extras = []
if group in extras:
is_activated = True
break
if not is_activated:
continue
for pkg in packages:
if pkg.name == dependency.name:
# If there is already a child with this name
# we merge the requirements
existing = None
for child in graph["children"]:
if child["name"] == pkg.name:
existing = child
continue
child_graph = self._build_graph(
pkg, packages, dependency, dep or dependency
)
if existing:
existing["python_version"] = str(
parse_constraint(existing["python_version"]).union(
parse_constraint(child_graph["python_version"])
)
)
continue
graph["children"].append(child_graph)
return graph
def _get_tags_for_package(self, package, graph):
categories = ["dev"]
optionals = [True]
python_versions = []
platforms = []
children = graph["children"]
for child in children:
if child["name"] == package.name:
category = child["category"]
optional = child["optional"]
python_version = child["python_version"]
platform = child["platform"]
else:
(
category,
optional,
python_version,
platform,
) = self._get_tags_for_package(package, child)
categories.append(category)
optionals.append(optional)
if python_version is not None:
python_versions.append(python_version)
if platform is not None:
platforms.append(platform)
if "main" in categories:
category = "main"
else:
category = "dev"
optional = all(optionals)
if not python_versions:
python_version = None
else:
# Find the least restrictive constraint
python_version = python_versions[0]
for constraint in python_versions[1:]:
previous = parse_constraint(python_version)
current = parse_constraint(constraint)
if python_version == "*":
continue
elif constraint == "*":
python_version = constraint
elif current.allows_all(previous):
python_version = constraint
if not platforms:
platform = None
else:
platform = platforms[0]
for constraint in platforms[1:]:
previous = GenericConstraint.parse(platform)
current = GenericConstraint.parse(constraint)
if platform == "*":
continue
elif constraint == "*":
platform = constraint
elif current.matches(previous):
platform = constraint
return category, optional, python_version, platform
| 32.689922 | 85 | 0.50996 |
49c916a43d78137ea22a94b7071b91fb7165c321 | 6,814 | py | Python | test/test_fold.py | f-dangel/unfoldNd | 63e9abc4867d8678c2ac00da567dc106e9f6f2c7 | [
"MIT"
] | 21 | 2021-03-04T04:56:20.000Z | 2022-03-31T11:15:28.000Z | test/test_fold.py | f-dangel/unfoldNd | 63e9abc4867d8678c2ac00da567dc106e9f6f2c7 | [
"MIT"
] | 12 | 2021-02-16T16:16:23.000Z | 2021-05-28T06:00:41.000Z | test/test_fold.py | f-dangel/unfoldNd | 63e9abc4867d8678c2ac00da567dc106e9f6f2c7 | [
"MIT"
] | 1 | 2021-11-04T12:52:19.000Z | 2021-11-04T12:52:19.000Z | """Tests for ``unfoldNd/fold.py.`` (fold functionality)."""
from test.fold_settings import (
DEVICES,
DEVICES_ID,
PRECISION_PROBLEMS_2D,
PRECISION_PROBLEMS_2D_IDS,
PROBLEMS_2D,
PROBLEMS_2D_IDS,
PROBLEMS_INVERSE,
PROBLEMS_INVERSE_IDS,
UNSUPPORTED_ARGS_PROBLEMS,
UNSUPPORTED_ARGS_PROBLEMS_IDS,
)
from test.unfold_settings import PROBLEMS_1D as UNFOLD_PROBLEMS_1D
from test.unfold_settings import PROBLEMS_1D_IDS as UNFOLD_PROBLEMS_1D_IDS
from test.unfold_settings import PROBLEMS_2D as UNFOLD_PROBLEMS_2D
from test.unfold_settings import PROBLEMS_2D_IDS as UNFOLD_PROBLEMS_2D_IDS
from test.unfold_settings import PROBLEMS_3D as UNFOLD_PROBLEMS_3D
from test.unfold_settings import PROBLEMS_3D_IDS as UNFOLD_PROBLEMS_3D_IDS
from test.utils import _add_dummy_dim
import pytest
import torch
import unfoldNd
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize(
"problem", UNSUPPORTED_ARGS_PROBLEMS, ids=UNSUPPORTED_ARGS_PROBLEMS_IDS
)
def test_FoldNd_unsupported_args(problem, device):
"""Check unsupported arguments of ``FoldNd``."""
seed = problem["seed"]
input_fn = problem["input_fn"]
fold_kwargs = problem["fold_kwargs"]
torch.manual_seed(seed)
inputs = input_fn().to(device)
with pytest.raises(ValueError):
_ = unfoldNd.FoldNd(**fold_kwargs).to(device)(inputs)
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize("problem", PROBLEMS_2D, ids=PROBLEMS_2D_IDS)
def test_Fold2d_vs_Fold(problem, device):
"""Compare with ``torch.nn.Fold`` for a 4d input."""
seed = problem["seed"]
input_fn = problem["input_fn"]
fold_kwargs = problem["fold_kwargs"]
torch.manual_seed(seed)
inputs = input_fn().to(device)
result_torch = torch.nn.Fold(**fold_kwargs).to(device)(inputs)
result_lib = unfoldNd.FoldNd(**fold_kwargs).to(device)(inputs)
assert torch.allclose(result_lib, result_torch)
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize(
"problem", PRECISION_PROBLEMS_2D, ids=PRECISION_PROBLEMS_2D_IDS
)
def test_Fold2d_vs_Fold_precision(problem, device):
"""Catch expected shortcomings of ``FoldNd`` caused by unfolding float indices."""
seed = problem["seed"]
input_fn = problem["input_fn"]
fold_kwargs = problem["fold_kwargs"]
torch.manual_seed(seed)
inputs = input_fn().to(device)
_ = torch.nn.Fold(**fold_kwargs).to(device)(inputs)
with pytest.raises(RuntimeError):
_ = unfoldNd.FoldNd(**fold_kwargs).to(device)(inputs)
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize("problem", UNFOLD_PROBLEMS_2D, ids=UNFOLD_PROBLEMS_2D_IDS)
def test_Fold2d_vs_Fold_after_Unfold(problem, device):
"""Compare with ``torch.nn.Fold`` for a 4d input.
Generate settings from unfold tests.
"""
seed = problem["seed"]
input_fn = problem["input_fn"]
unfold_kwargs = problem["unfold_kwargs"]
torch.manual_seed(seed)
unfold_input = input_fn().to(device)
inputs = torch.nn.functional.unfold(unfold_input, **unfold_kwargs)
fold_kwargs = problem["unfold_kwargs"]
output_size = unfold_input.shape[2:]
result_torch = torch.nn.Fold(output_size, **fold_kwargs).to(device)(inputs)
result_lib = unfoldNd.FoldNd(output_size, **fold_kwargs).to(device)(inputs)
assert torch.allclose(result_lib, result_torch)
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize("problem", UNFOLD_PROBLEMS_1D, ids=UNFOLD_PROBLEMS_1D_IDS)
def test_Fold1d_vs_Fold_after_dummy_dim_Unfold(problem, device):
"""Compare with ``torch.nn.Fold`` for a 3d input.
Generate settings from unfold tests and by adding a dummy dimension to achieve
compatibility with ``torch.nn.Unfold``.
"""
seed = problem["seed"]
input_fn = problem["input_fn"]
unfold_kwargs = problem["unfold_kwargs"]
torch.manual_seed(seed)
unfold_inputs = input_fn().to(device)
unfold_kwargs_dummy_dim, inputs_dummy_dim = _add_dummy_dim(
unfold_kwargs, unfold_inputs
)
inputs = torch.nn.Unfold(**unfold_kwargs_dummy_dim).to(device)(inputs_dummy_dim)
output_size_dummy_dim = tuple(inputs_dummy_dim.shape[2:])
result_torch = (
torch.nn.Fold(output_size_dummy_dim, **unfold_kwargs_dummy_dim)
.to(device)(inputs)
.squeeze(-1)
)
fold_kwargs = problem["unfold_kwargs"]
output_size = unfold_inputs.shape[2:]
result_lib = unfoldNd.FoldNd(output_size, **fold_kwargs).to(device)(inputs)
assert torch.allclose(result_lib, result_torch)
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize("problem", PROBLEMS_INVERSE, ids=PROBLEMS_INVERSE_IDS)
def test_Fold_inverse_of_Unfold(problem, device):
"""Compare that folding is the inverse of unfolding on 3d/4d/5d inputs.
This relation only holds if every pixel/voxel is used exactly once, i.e.
patches don't overlap and cover the entire image/volume.
"""
seed = problem["seed"]
input_fn = problem["input_fn"]
unfold_kwargs = problem["unfold_kwargs"]
torch.manual_seed(seed)
inputs = input_fn().to(device)
unfolded = unfoldNd.unfoldNd(inputs, **unfold_kwargs)
fold_kwargs = problem["unfold_kwargs"]
output_size = inputs.shape[2:]
folded = unfoldNd.FoldNd(output_size, **fold_kwargs).to(device)(unfolded)
assert torch.allclose(inputs, folded)
@pytest.mark.parametrize("device", DEVICES, ids=DEVICES_ID)
@pytest.mark.parametrize(
"problem",
UNFOLD_PROBLEMS_1D + UNFOLD_PROBLEMS_2D + UNFOLD_PROBLEMS_3D,
ids=UNFOLD_PROBLEMS_1D_IDS + UNFOLD_PROBLEMS_2D_IDS + UNFOLD_PROBLEMS_3D_IDS,
)
def test_FoldNd_divisor(problem, device):
"""Test divisor tensor from ``fold-unfold`` composition.
According to https://pytorch.org/docs/stable/generated/torch.nn.Fold.html the
divisor between an input tensor and the result of an unfold-fold composition
is satisfies ``fold(unfold(input)) == divisor * input`` with
``input_ones = torch.ones(input.shape, dtype=input.dtype)`` and
``divisor = fold(unfold(input_ones))``
"""
seed = problem["seed"]
input_fn = problem["input_fn"]
unfold_kwargs = problem["unfold_kwargs"]
torch.manual_seed(seed)
inputs = input_fn().to(device)
inputs_ones = torch.ones(inputs.shape, dtype=inputs.dtype).to(device)
unfold_module = unfoldNd.UnfoldNd(**unfold_kwargs).to(device)
fold_kwargs = problem["unfold_kwargs"]
output_size = inputs.shape[2:]
fold_module = unfoldNd.FoldNd(output_size, **fold_kwargs).to(device)
divisor = fold_module(unfold_module(inputs_ones))
outputs = fold_module(unfold_module(inputs))
assert torch.allclose(outputs, divisor * inputs)
| 34.414141 | 86 | 0.736131 |
aba89e44ac77b292be4a075f307dfaefdc6d4ef6 | 8,567 | py | Python | tests/importer/test_importer.py | peeley/hy | 03404e45bda2aca9aa6040e8c9fa5da5bfa598ef | [
"MIT"
] | null | null | null | tests/importer/test_importer.py | peeley/hy | 03404e45bda2aca9aa6040e8c9fa5da5bfa598ef | [
"MIT"
] | null | null | null | tests/importer/test_importer.py | peeley/hy | 03404e45bda2aca9aa6040e8c9fa5da5bfa598ef | [
"MIT"
] | null | null | null | # Copyright 2019 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import os
import sys
import ast
import tempfile
import runpy
import importlib
from fractions import Fraction
from importlib import reload
import pytest
import hy
from hy.lex import hy_parse
from hy.errors import HyLanguageError
from hy.lex.exceptions import PrematureEndOfInput
from hy.compiler import hy_eval, hy_compile
from hy.importer import HyLoader
def test_basics():
"Make sure the basics of the importer work"
assert os.path.isfile('tests/resources/__init__.py')
resources_mod = importlib.import_module('tests.resources')
assert hasattr(resources_mod, 'kwtest')
assert os.path.isfile('tests/resources/bin/__init__.hy')
bin_mod = importlib.import_module('tests.resources.bin')
assert hasattr(bin_mod, '_null_fn_for_import_test')
def test_runpy():
# XXX: `runpy` won't update cached bytecode! Don't know if that's
# intentional or not.
basic_ns = runpy.run_path('tests/resources/importer/basic.hy')
assert 'square' in basic_ns
main_ns = runpy.run_path('tests/resources/bin')
assert main_ns['visited_main'] == 1
del main_ns
main_ns = runpy.run_module('tests.resources.bin')
assert main_ns['visited_main'] == 1
with pytest.raises(IOError):
runpy.run_path('tests/resources/foobarbaz.py')
def test_stringer():
_ast = hy_compile(hy_parse("(defn square [x] (* x x))"), __name__)
assert type(_ast.body[0]) == ast.FunctionDef
def test_imports():
path = os.getcwd() + "/tests/resources/importer/a.hy"
testLoader = HyLoader("tests.resources.importer.a", path)
def _import_test():
try:
return testLoader.load_module()
except:
return "Error"
assert _import_test() == "Error"
assert _import_test() is not None
def test_import_error_reporting():
"Make sure that (import) reports errors correctly."
with pytest.raises(HyLanguageError):
hy_compile(hy_parse("(import \"sys\")"), __name__)
def test_import_error_cleanup():
"Failed initial imports should not leave dead modules in `sys.modules`."
with pytest.raises(hy.errors.HyMacroExpansionError):
importlib.import_module('tests.resources.fails')
assert 'tests.resources.fails' not in sys.modules
@pytest.mark.skipif(sys.dont_write_bytecode,
reason="Bytecode generation is suppressed")
def test_import_autocompiles():
"Test that (import) byte-compiles the module."
with tempfile.NamedTemporaryFile(suffix='.hy', delete=True) as f:
f.write(b'(defn pyctest [s] (+ "X" s "Y"))')
f.flush()
pyc_path = importlib.util.cache_from_source(f.name)
try:
os.remove(pyc_path)
except (IOError, OSError):
pass
test_loader = HyLoader("mymodule", f.name).load_module()
assert hasattr(test_loader, 'pyctest')
assert os.path.exists(pyc_path)
os.remove(pyc_path)
def test_eval():
def eval_str(s):
return hy_eval(hy.read_str(s), filename='<string>', source=s)
assert eval_str('[1 2 3]') == [1, 2, 3]
assert eval_str('{"dog" "bark" "cat" "meow"}') == {
'dog': 'bark', 'cat': 'meow'}
assert eval_str('(, 1 2 3)') == (1, 2, 3)
assert eval_str('#{3 1 2}') == {1, 2, 3}
assert eval_str('1/2') == Fraction(1, 2)
assert eval_str('(.strip " fooooo ")') == 'fooooo'
assert eval_str(
'(if True "this is if true" "this is if false")') == "this is if true"
assert eval_str('(lfor num (range 100) :if (= (% num 2) 1) (pow num 2))') == [
pow(num, 2) for num in range(100) if num % 2 == 1]
def test_reload():
"""Generate a test module, confirm that it imports properly (and puts the
module in `sys.modules`), then modify the module so that it produces an
error when reloaded. Next, fix the error, reload, and check that the
module is updated and working fine. Rinse, repeat.
This test is adapted from CPython's `test_import.py`.
"""
def unlink(filename):
os.unlink(source)
bytecode = importlib.util.cache_from_source(source)
if os.path.isfile(bytecode):
os.unlink(bytecode)
TESTFN = 'testfn'
source = TESTFN + os.extsep + "hy"
with open(source, "w") as f:
f.write("(setv a 1)")
f.write("(setv b 2)")
sys.path.insert(0, os.curdir)
try:
mod = importlib.import_module(TESTFN)
assert TESTFN in sys.modules
assert mod.a == 1
assert mod.b == 2
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
unlink(source)
# Now damage the module.
with open(source, "w") as f:
f.write("(setv a 10)")
f.write("(setv b (// 20 0))")
with pytest.raises(ZeroDivisionError):
reload(mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
assert mod is not None
# We should have replaced a w/ 10, but the old b value should
# stick.
assert mod.a == 10
assert mod.b == 2
# Now fix the issue and reload the module.
unlink(source)
with open(source, "w") as f:
f.write("(setv a 11)")
f.write("(setv b (// 20 1))")
reload(mod)
mod = sys.modules.get(TESTFN)
assert mod is not None
assert mod.a == 11
assert mod.b == 20
# Now cause a syntax error
unlink(source)
with open(source, "w") as f:
# Missing paren...
f.write("(setv a 11")
f.write("(setv b (// 20 1))")
with pytest.raises(PrematureEndOfInput):
reload(mod)
mod = sys.modules.get(TESTFN)
assert mod is not None
assert mod.a == 11
assert mod.b == 20
# Fix it and retry
unlink(source)
with open(source, "w") as f:
f.write("(setv a 12)")
f.write("(setv b (// 10 1))")
reload(mod)
mod = sys.modules.get(TESTFN)
assert mod is not None
assert mod.a == 12
assert mod.b == 10
finally:
del sys.path[0]
if TESTFN in sys.modules:
del sys.modules[TESTFN]
unlink(source)
def test_reload_reexecute(capsys):
"""A module is re-executed when it's reloaded, even if it's
unchanged.
https://github.com/hylang/hy/issues/712"""
import tests.resources.hello_world
assert capsys.readouterr().out == 'hello world\n'
assert capsys.readouterr().out == ''
reload(tests.resources.hello_world)
assert capsys.readouterr().out == 'hello world\n'
def test_circular():
"""Test circular imports by creating a temporary file/module that calls a
function that imports itself."""
sys.path.insert(0, os.path.abspath('tests/resources/importer'))
try:
mod = runpy.run_module('circular')
assert mod['f']() == 1
finally:
sys.path.pop(0)
def test_shadowed_basename():
"""Make sure Hy loads `.hy` files instead of their `.py` counterparts (.e.g
`__init__.py` and `__init__.hy`).
"""
sys.path.insert(0, os.path.realpath('tests/resources/importer'))
try:
assert os.path.isfile('tests/resources/importer/foo/__init__.hy')
assert os.path.isfile('tests/resources/importer/foo/__init__.py')
assert os.path.isfile('tests/resources/importer/foo/some_mod.hy')
assert os.path.isfile('tests/resources/importer/foo/some_mod.py')
foo = importlib.import_module('foo')
assert foo.__file__.endswith('foo/__init__.hy')
assert foo.ext == 'hy'
some_mod = importlib.import_module('foo.some_mod')
assert some_mod.__file__.endswith('foo/some_mod.hy')
assert some_mod.ext == 'hy'
finally:
sys.path.pop(0)
def test_docstring():
"""Make sure a module's docstring is loaded."""
sys.path.insert(0, os.path.realpath('tests/resources/importer'))
try:
mod = importlib.import_module('docstring')
expected_doc = ("This module has a docstring.\n\n"
"It covers multiple lines, too!\n")
assert mod.__doc__ == expected_doc
assert mod.a == 1
finally:
sys.path.pop(0)
| 29.339041 | 82 | 0.621454 |
3fd38430637be48e1c877b3f52d6934e4fd918c1 | 942 | py | Python | st2common/st2common/exceptions/ssh.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 1 | 2020-11-09T21:05:33.000Z | 2020-11-09T21:05:33.000Z | st2common/st2common/exceptions/ssh.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 3 | 2021-03-25T23:57:10.000Z | 2021-03-26T00:01:05.000Z | st2common/st2common/exceptions/ssh.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'InvalidCredentialsException'
]
class InvalidCredentialsException(Exception):
pass
class NoHostsConnectedToException(Exception):
pass
| 34.888889 | 74 | 0.771762 |
ccf8e6cec1c4edc72fe139ad14fb27bfc013d777 | 1,543 | py | Python | test_data.py | BlitzKraft/xkcd_random_test | 99e0a69999e2e220a60f19ea99d706f6187ba1ba | [
"Unlicense"
] | null | null | null | test_data.py | BlitzKraft/xkcd_random_test | 99e0a69999e2e220a60f19ea99d706f6187ba1ba | [
"Unlicense"
] | null | null | null | test_data.py | BlitzKraft/xkcd_random_test | 99e0a69999e2e220a60f19ea99d706f6187ba1ba | [
"Unlicense"
] | null | null | null | import random
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt # pylint: disable=C0413
# number of comics read per week
READ_COMICS = 400
# number of comics released per week
RELEASED_COMICS = 3
START = 1700
END = 2140
# number of additional iterations after latest comic
TAIL_ITERS = 1
def get_week(start=START, count_dict={}): # pylint: disable=W0102
sample = random.sample(range(1, start), READ_COMICS)
# For testing wiht shuffle instead of choosing randomly
# sample = list(range(1, start))
# random.shuffle(sample)
# sample = sample[:READ_COMICS]
for index in sample:
count = count_dict.get(index, 0)
count_dict.update({index: count + 1})
if start < END:
start += RELEASED_COMICS
get_week(start, count_dict)
else:
for _ in range(0, TAIL_ITERS):
sample = random.sample(range(1, start), READ_COMICS)
for index in sample:
count = count_dict.get(index, 0)
count_dict.update({index: count + 1})
return count_dict
def draw_graph(count):
# for trimming the results to only after a certain comic index
cur = 0
keylist = list(count.keys())
keylist.sort()
plt.xkcd()
plt.figure(figsize=(12, 6), dpi=100)
plt.xlabel("Comic index")
plt.ylabel("Number of views")
run_len = range(cur, max(keylist))
plt.scatter(run_len, [count.get(i, 0) for i in run_len])
plt.savefig('graph.png')
COUNTS_PER_COMIC = get_week()
draw_graph(COUNTS_PER_COMIC)
| 28.574074 | 66 | 0.663642 |
363513efae35741465beb662f97998e35b9da19d | 2,917 | py | Python | examples/python/chemical_balance_sat.py | klorel/or-tools | f3fd201e68cf75b7720ff5c3cadc599a1d02b54b | [
"Apache-2.0"
] | 1 | 2019-07-15T14:30:18.000Z | 2019-07-15T14:30:18.000Z | examples/python/chemical_balance_sat.py | klorel/or-tools | f3fd201e68cf75b7720ff5c3cadc599a1d02b54b | [
"Apache-2.0"
] | 1 | 2021-02-23T10:22:55.000Z | 2021-02-23T13:57:14.000Z | examples/python/chemical_balance_sat.py | klorel/or-tools | f3fd201e68cf75b7720ff5c3cadc599a1d02b54b | [
"Apache-2.0"
] | 2 | 2020-02-26T18:11:33.000Z | 2020-12-02T07:44:34.000Z | # Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We are trying to group items in equal sized groups.
# Each item has a color and a value. We want the sum of values of each group to
# be as close to the average as possible.
# Furthermore, if one color is an a group, at least k items with this color must
# be in that group.
from __future__ import print_function
from __future__ import division
from ortools.sat.python import cp_model
import math
# Data
max_quantities = [["N_Total", 1944], ["P2O5", 1166.4], ["K2O", 1822.5],
["CaO", 1458], ["MgO", 486], ["Fe", 9.7], ["B", 2.4]]
chemical_set = [["A", 0, 0, 510, 540, 0, 0, 0], ["B", 110, 0, 0, 0, 160, 0, 0],
["C", 61, 149, 384, 0, 30, 1,
0.2], ["D", 148, 70, 245, 0, 15, 1,
0.2], ["E", 160, 158, 161, 0, 10, 1, 0.2]]
num_products = len(max_quantities)
all_products = range(num_products)
num_sets = len(chemical_set)
all_sets = range(num_sets)
# Model
model = cp_model.CpModel()
# Scale quantities by 100.
max_set = [
int(
math.ceil(
min(max_quantities[q][1] * 1000 / chemical_set[s][q + 1]
for q in all_products if chemical_set[s][q + 1] != 0)))
for s in all_sets
]
set_vars = [model.NewIntVar(0, max_set[s], "set_%i" % s) for s in all_sets]
epsilon = model.NewIntVar(0, 10000000, "epsilon")
for p in all_products:
model.Add(
sum(int(chemical_set[s][p + 1] * 10) * set_vars[s]
for s in all_sets) <= int(max_quantities[p][1] * 10000))
model.Add(
sum(int(chemical_set[s][p + 1] * 10) * set_vars[s]
for s in all_sets) >= int(max_quantities[p][1] * 10000) - epsilon)
model.Minimize(epsilon)
# Creates a solver and solves.
solver = cp_model.CpSolver()
status = solver.Solve(model)
print("Status = %s" % solver.StatusName(status))
# The objective value of the solution.
print("Optimal objective value = %f" % (solver.ObjectiveValue() / 10000.0))
for s in all_sets:
print(
" %s = %f" % (chemical_set[s][0], solver.Value(set_vars[s]) / 1000.0),
end=" ")
print()
for p in all_products:
name = max_quantities[p][0]
max_quantity = max_quantities[p][1]
quantity = sum(
solver.Value(set_vars[s]) / 1000.0 * chemical_set[s][p + 1]
for s in all_sets)
print("%s: %f out of %f" % (name, quantity, max_quantity))
| 33.147727 | 80 | 0.63867 |
3d9d21deaed68cb4cba7ddcffc5675160229f3f8 | 2,973 | py | Python | colliflow/python/examples/shared_modules.py | YodaEmbedding/colliflow | 524a9397878ce2e7dde6a4526a91f866c03fc3e7 | [
"MIT"
] | 1 | 2021-02-08T22:20:34.000Z | 2021-02-08T22:20:34.000Z | colliflow/python/examples/shared_modules.py | YodaEmbedding/colliflow | 524a9397878ce2e7dde6a4526a91f866c03fc3e7 | [
"MIT"
] | null | null | null | colliflow/python/examples/shared_modules.py | YodaEmbedding/colliflow | 524a9397878ce2e7dde6a4526a91f866c03fc3e7 | [
"MIT"
] | null | null | null | from time import sleep, time
from typing import Tuple
import rx
import rx.operators as ops
from colliflow import InputAsyncModule, Model, Module, SymbolicTensor, Tensor
epoch = time()
def get_time():
return time() - epoch
class Preprocessor(Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def inner_config(self):
return {}
def forward(self, tensor: Tensor):
return tensor
def set_props_hook(self, tensor: SymbolicTensor):
self._shape = tensor.shape
self._dtype = tensor.dtype
class ClientInferenceModel(Module):
def __init__(self, func=None, shape=None, dtype=None, **kwargs):
super().__init__(shape, dtype, **kwargs)
self.func = func
def inner_config(self):
return {"shape": self.shape, "dtype": self.dtype}
def forward(self, tensor: Tensor):
sleep(0.7)
return self.func(tensor)
class ServerInferenceModel(Module):
def __init__(self, func=None, shape=None, dtype=None, **kwargs):
super().__init__(shape, dtype, **kwargs)
self.func = func
def inner_config(self):
return {"shape": self.shape, "dtype": self.dtype}
def forward(self, tensor: Tensor):
sleep(0.5)
return self.func(tensor)
class Postencoder(Module):
def __init__(self, **kwargs):
super().__init__((None,), "uint8", **kwargs)
def inner_config(self):
return {}
def forward(self, tensor: Tensor):
return tensor
class Predecoder(Module):
def __init__(self, shape, dtype, **kwargs):
super().__init__(shape, dtype, **kwargs)
def inner_config(self):
return {"shape": self.shape, "dtype": self.dtype}
def forward(self, tensor: Tensor):
return tensor
def FakeInput(shape: Tuple[int], dtype: str): # pylint: disable=invalid-name
return FakeInputLayer(shape, dtype)()
class FakeInputLayer(InputAsyncModule):
name = "FakeInput"
def __init__(self, shape: Tuple[int], dtype: str, **kwargs):
super().__init__(shape, dtype, **kwargs)
def inner_config(self):
return {"shape": self.shape, "dtype": self.dtype}
def produce(self):
return rx.interval(1).pipe(
ops.do_action(lambda x: print(f"\n{get_time():.1f} Frame {x}\n")),
ops.map(lambda _: Tensor((224, 224, 3), "uint8")),
ops.share(),
)
def model_from_config(model_config) -> Model:
client_func = lambda _: Tensor(shape=(14, 14, 512), dtype="uint8")
server_func = lambda _: Tensor(shape=(1000,), dtype="float32")
model = (
Model.deserialize(model_config)
if isinstance(model_config, str)
else Model.deserialize_dict(model_config)
)
x = next(x for x in model.modules if isinstance(x, ClientInferenceModel))
x.func = client_func
x = next(x for x in model.modules if isinstance(x, ServerInferenceModel))
x.func = server_func
return model
| 25.62931 | 79 | 0.638412 |
2b5f50ebe9ddb18064687bbe315410fec83b16fc | 3,217 | py | Python | tests/web/test_galaxy.py | jvazquez77/marvin | f7f2c498c1a2cfa19ab96c51eebbbd4bde980984 | [
"BSD-3-Clause"
] | 49 | 2016-11-04T19:20:50.000Z | 2022-03-13T08:43:05.000Z | tests/web/test_galaxy.py | jvazquez77/marvin | f7f2c498c1a2cfa19ab96c51eebbbd4bde980984 | [
"BSD-3-Clause"
] | 703 | 2016-11-02T01:25:14.000Z | 2022-03-31T19:20:03.000Z | tests/web/test_galaxy.py | jvazquez77/marvin | f7f2c498c1a2cfa19ab96c51eebbbd4bde980984 | [
"BSD-3-Clause"
] | 37 | 2016-11-09T08:51:48.000Z | 2022-02-22T22:49:45.000Z | # !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-22 10:38:28
# @Last modified by: Brian Cherinka
# @Last modified time: 2017-07-31 12:07:00
from __future__ import print_function, division, absolute_import
from marvin.web.controllers.galaxy import make_nsa_dict
from marvin.web.controllers.galaxy import getWebMap
from marvin.tools.cube import Cube
from tests.conftest import set_the_config
import pytest
@pytest.fixture()
def cube(galaxy, mode):
set_the_config(galaxy.release)
cube = Cube(plateifu=galaxy.plateifu, mode=mode, release=galaxy.release)
cube.exp_nsa_plotcols = galaxy.nsa_data
return cube
@pytest.fixture()
def params(galaxy):
return {'release': galaxy.release}
@pytest.mark.parametrize('page', [('galaxy_page', 'Galaxy:index')], ids=['galaxy'], indirect=True)
class TestGalaxyPage(object):
def test_assert_galaxy_template_used(self, page, get_templates):
page.load_page('get', page.url)
assert '' == page.data
template, context = get_templates[0]
assert 'galaxy.html' == template.name, 'Template used should be galaxy.html'
@pytest.mark.parametrize('page', [('galaxy_page', 'initnsaplot')], ids=['initnsa'], indirect=True)
class TestNSA(object):
#@marvin_test_if(mark='skip', cube=dict(nsa=[None]))
def test_nsadict_correct(self, cube, page):
nsa, cols = make_nsa_dict(cube.nsa)
for value in cube.exp_nsa_plotcols.values():
assert set(value.keys()).issubset(set(cols))
page.assert_dict_contains_subset(value, nsa)
page.assertListIn(value.keys(), cols)
@pytest.mark.skip('these magically worked when they should not have and now they actually do not')
def test_initnsa_method_not_allowed(self, page, params, get_templates):
page.load_page('get', page.url, params=params)
template, context = get_templates[0]
assert template.name == 'errors/method_not_allowed.html'
def test_initnsa_no_plateifu(self, page, get_templates):
errmsg = 'Field may not be null.'
page.load_page('post', page.url)
template, context = get_templates[0]
page.route_no_valid_webparams(template, context, 'plateifu', reqtype='post', errmsg=errmsg)
class TestWebMap(object):
@pytest.mark.parametrize('parameter, channel',
[('emline_gflux', 'ha_6564'),
('emline_gsigma', 'ha_6564'),
('stellar_sigma', None)],
ids=['gflux', 'gsigma', 'stellarsigma'])
def test_getmap(self, cube, parameter, channel):
webmap, mapmsg = getWebMap(cube, parameter=parameter, channel=channel)
assert isinstance(webmap, dict)
assert 'values' in webmap
assert isinstance(webmap['values'], list)
assert parameter in mapmsg
if 'sigma' in parameter and cube.release != 'MPL-6':
assert 'Corrected' in mapmsg
def test_getmap_failed(self, cube):
webmap, mapmsg = getWebMap(cube, parameter='crap')
assert webmap is None
assert 'Could not get map' in mapmsg
| 34.967391 | 102 | 0.666459 |
c93fa7f6947575813aa9486a85bca8c01e9aee90 | 450 | py | Python | test_multiple_boys.py | sorindragan/ChatBot_Th | 4f5ef81b598035d72ec5a2852a88847692bece58 | [
"MIT"
] | null | null | null | test_multiple_boys.py | sorindragan/ChatBot_Th | 4f5ef81b598035d72ec5a2852a88847692bece58 | [
"MIT"
] | null | null | null | test_multiple_boys.py | sorindragan/ChatBot_Th | 4f5ef81b598035d72ec5a2852a88847692bece58 | [
"MIT"
] | 1 | 2019-10-31T19:55:02.000Z | 2019-10-31T19:55:02.000Z | from sentence_processor import SentenceProcessor
def multiple_boys(phrase):
sp = SentenceProcessor(phrase, 0)
return sp.process()
def test_multiple_boys():
output = [('boy0', 'outraced', 'charlie'),
('boy0', 'property', 'tall'),
('boy1', 'outraced', 'charlie'),
('boy1', 'property', 'ugly')
]
assert multiple_boys("The tall boy and the ugly boy outraced Charlie.") == output
| 30 | 85 | 0.595556 |
f617a62e09415c0152de39f57a64acfd7d98b984 | 900 | py | Python | BOJ/1303.py | Jaesin22/TIL | d0aa137af79c22ea9eb0ff1c7a0264c086ebe1b2 | [
"MIT"
] | null | null | null | BOJ/1303.py | Jaesin22/TIL | d0aa137af79c22ea9eb0ff1c7a0264c086ebe1b2 | [
"MIT"
] | null | null | null | BOJ/1303.py | Jaesin22/TIL | d0aa137af79c22ea9eb0ff1c7a0264c086ebe1b2 | [
"MIT"
] | null | null | null | from collections import deque
N, M = map(int, input().split())
graph = [list(input().strip()) for _ in range(M)]
visited = [[0] * N for _ in range(M)]
white, blue = 0, 0
dx = [-1,1,0,0]
dy = [0,0,-1,1]
queue = deque()
def BFS(x, y):
cnt = 1
queue.append((x, y))
visited[x][y] = 1
while queue:
a, b = queue.popleft()
for i in range(4):
nx = a + dx[i]
ny = b + dy[i]
if 0 <= nx < M and 0 <= ny < N and visited[nx][ny] == 0 and graph[a][b] == graph[nx][ny]:
visited[nx][ny] = 1
queue.append((nx, ny))
cnt += 1
return cnt
for i in range(M):
for j in range(N):
if visited[i][j] == 0:
res = BFS(i, j)
if graph[i][j] == 'W':
white += res ** 2
else:
blue += res ** 2
print(white, blue)
| 21.428571 | 101 | 0.431111 |
b50c8ab37ee178a3c0e8009450d113d6697e531a | 1,376 | py | Python | domain_manage/urls.py | wolfcheng/nano-cf-panel | 17235552e5cac79d05a0d1e924e0aac730678f4b | [
"MIT"
] | null | null | null | domain_manage/urls.py | wolfcheng/nano-cf-panel | 17235552e5cac79d05a0d1e924e0aac730678f4b | [
"MIT"
] | null | null | null | domain_manage/urls.py | wolfcheng/nano-cf-panel | 17235552e5cac79d05a0d1e924e0aac730678f4b | [
"MIT"
] | null | null | null | """domain_manage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import *
from django.contrib import admin
from domain_manage.views import *
from django.contrib import admin
app_name='manage'
urlpatterns = [
url(r'^b',domain_get_first),
url(r'^add_cname',add_cname),
url(r'^dns_records',domain_records_list),
url(r'^proxied_set',record_set),
url(r'^update_record',record_set),
url(r'^new_record',record_set),
url(r'^delete_record',record_set),
url(r'^cname_detail',cname_detail),
url(r'^add',add_z_c_r),
url(r'^zone_delete',add_z_c_r),
url(r'^purge_cache',purge_cache),
url(r'^cache',cache),
url(r'^ssl',ssl),
url(r'^analytics',dashboard),
url(r'^is_monitor',is_monitor_on),
url(r'^monitors',monitors),
]
| 33.560976 | 79 | 0.694767 |
a2f5836fc834fc0558c267615bdd8d3f594d9ca3 | 45,923 | py | Python | pysimm/lmps.py | jrdcasa/pysimm | 9fc94fa658f42283b503d72886581c9acdcac5e2 | [
"MIT"
] | null | null | null | pysimm/lmps.py | jrdcasa/pysimm | 9fc94fa658f42283b503d72886581c9acdcac5e2 | [
"MIT"
] | null | null | null | pysimm/lmps.py | jrdcasa/pysimm | 9fc94fa658f42283b503d72886581c9acdcac5e2 | [
"MIT"
] | null | null | null | # ******************************************************************************
# pysimm.lmps module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import shlex
import shutil
from subprocess import call, Popen, PIPE
from queue import Queue, Empty
from threading import Thread
import os
import sys
import json
from random import randint
from time import strftime
from io import StringIO
try:
import pandas as pd
except ImportError:
pd = None
from pysimm.system import read_lammps
from pysimm.system import System
from pysimm import error_print
from pysimm import warning_print
from pysimm import verbose_print
from pysimm import debug_print
from pysimm.utils import PysimmError, Item, ItemContainer
try:
from Rappture.tools import getCommandOutput as RapptureExec
except ImportError:
pass
LAMMPS_EXEC = os.environ.get('LAMMPS_EXEC')
verbose = False
templates = {}
FF_SETTINGS = {
'dreiding':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'harmonic',
'improper_style': 'umbrella',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'dreiding'
},
'amber':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'fourier',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'amber'
},
'pcff':
{
'pair_style': 'lj/class2',
'bond_style': 'class2',
'angle_style': 'class2',
'dihedral_style': 'class2',
'improper_style': 'class2',
'pair_modify': {
'mix': 'sixthpower'
},
'special_bonds': 'lj/coul 0 0 1'
},
'opls':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'opls',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'geometric'
},
'special_bonds': 'lj/coul 0 0 0.5'
},
'charmm':
{
'pair_style': 'lj/charmm',
'bond_style': 'harmonic',
'angle_style': 'charmm',
'dihedral_style': 'charmm',
'improper_style': 'harmonic',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'charmm'
},
'trappe/amber':
{
'pair_style': 'lj/cut',
'bond_style': 'harmonic',
'angle_style': 'harmonic',
'dihedral_style': 'fourier',
'improper_style': 'cvff',
'pair_modify': {
'mix': 'arithmetic'
},
'special_bonds': 'amber'
}
}
def check_lmps_exec():
if LAMMPS_EXEC is None:
print('you must set environment variable LAMMPS_EXEC')
return False
else:
try:
stdout, stderr = Popen([LAMMPS_EXEC, '-e', 'both', '-l', 'none'],
stdin=PIPE, stdout=PIPE,
stderr=PIPE).communicate()
if verbose:
print('using %s LAMMPS machine' % LAMMPS_EXEC)
return True
except OSError:
print('LAMMPS is not configured properly for one reason or another')
return False
class Init(object):
"""pysimm.lmps.Init
Template object to contain LAMMPS initialization settings
Attributes:
forcefield: name of a supported force field; simulation settings will be chosen based on the force field name
units: LAMMPS set of units to use during simulation; default=real
atom_style: LAMMPS aomt_style to use during simulation; default=full
charge: option to define if any particles in system a non-zero charge
kspace_style: LAMMPS kspace_style to use during simulation if system has charges; default=pppm 1e-4
cutoff: dictionary of cutoff distances for nonbonded interactions; default={'lj': 12.0, 'coul': 12.0, 'inner_lj': 10.0}
pair_style: LAMMPS pair_style to use during simulation
bond_style: LAMMPS bond_style to use during simulation
angle_style: LAMMPS angle_style to use during simulation
dihedral_style: LAMMPS dihedral_style to use during simulation
improper_style: LAMMPS improper_style to use during simulation
special_bonds: LAMMPS special_bonds to use during simulation
pair_modify: LAMMPS pair_modify to use during simulation
read_data: name of data file to read instead of using :class:`~pysimm.system.System` object
"""
def __init__(self, **kwargs):
self.forcefield = kwargs.get('forcefield')
self.units = kwargs.get('units', 'real')
self.atom_style = kwargs.get('atom_style', 'full')
self.charge = kwargs.get('charge')
self.kspace_style = kwargs.get('kspace_style', 'pppm 1e-4')
self.cutoff = kwargs.get('cutoff')
self.pair_style = kwargs.get('pair_style')
self.bond_style = kwargs.get('bond_style')
self.angle_style = kwargs.get('angle_style')
self.dihedral_style = kwargs.get('dihedral_style')
self.improper_style = kwargs.get('improper_style')
self.special_bonds = kwargs.get('special_bonds')
self.pair_modify = kwargs.get('pair_modify', {})
self.create_box = kwargs.get('create_box')
self.read_data = kwargs.get('read_data')
if self.forcefield and self.forcefield not in ['amber', 'trappe/amber', 'dreiding', 'pcff', 'opls', 'charmm']:
if self.forcefield.lower() in ['gaff', 'gaff2']:
self.forcefield = 'amber'
elif self.forcefield.lower() in ['cgenff']:
self.forcefield = 'charmm'
if isinstance(self.cutoff, int) or isinstance(self.cutoff, float):
self.cutoff = {'lj': self.cutoff, 'coul': self.cutoff, 'inner_lj': self.cutoff-2.0}
if self.cutoff is None:
self.cutoff = {'lj': 12.0, 'coul': 12.0, 'inner_lj': 10.0}
def write(self, sim=None):
"""pysimm.lmps.Init.write
Prepare LAMMPS input with initialization settings
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
string of LAMMPS input
"""
if sim:
s = sim.system
else:
s = None
if self.forcefield is None and s and s.forcefield is not None:
if s.forcefield in ['gaff', 'gaff2']:
self.forcefield = 'amber'
elif s.forcefield in ['cgenff']:
self.forcefield = 'charmm'
else:
self.forcefield = s.forcefield
elif self.forcefield is None and sim and sim.forcefield is not None:
self.forcefield = sim.forcefield
if self.special_bonds is None and self.forcefield is not None:
self.special_bonds = FF_SETTINGS[self.forcefield]['special_bonds']
if self.forcefield is not None:
pair_modify = FF_SETTINGS[self.forcefield]['pair_modify']
if self.pair_modify:
pair_modify.update(self.pair_modify)
self.pair_modify = pair_modify
if self.charge is None and s is not None:
for p in s.particles:
if p.charge:
self.charge = True
break
if self.charge is None:
self.charge=False
lammps_input = ''
lammps_input += '\n' + '#'*80 + '\n'
lammps_input += '#'*34 + ' Init ' + '#'*34 + '\n'
lammps_input += '#'*80 + '\n'
lammps_input += '{:<15} {}\n'.format('units', self.units)
lammps_input += '{:<15} {}\n'.format('atom_style', self.atom_style)
if self.create_box and self.create_box.region and type(self.create_box.region) is Region:
lammps_input += self.create_box.region.write(None)
lammps_input += self.create_box.write(None)
if self.pair_style:
lammps_input += '{:<15} {}'.format('pair_style', self.pair_style)
elif self.forcefield:
self.pair_style = FF_SETTINGS[self.forcefield]['pair_style']
lammps_input += '{:<15} {}'.format('pair_style', self.pair_style)
if self.charge:
lammps_input += '/coul/long'
self.pair_style += '/coul/long'
else:
raise PysimmError('A pair_style must be defined during initialization')
if self.cutoff:
if self.forcefield == ['charmm'] and self.cutoff.get('inner_lj'):
lammps_input += ' {} '.format(self.cutoff['inner_lj'])
lammps_input += ' {} '.format(self.cutoff['lj'])
if self.charge and self.cutoff.get('coul'):
lammps_input += ' {} '.format(self.cutoff['coul'])
lammps_input += '\n'
if self.charge:
lammps_input += '{:<15} {}\n'.format('kspace_style', self.kspace_style)
if self.bond_style is None and s and s.bonds.count > 0:
if self.forcefield:
self.bond_style = FF_SETTINGS[self.forcefield]['bond_style']
if self.bond_style:
lammps_input += '{:<15} {}\n'.format('bond_style', self.bond_style)
if self.angle_style is None and s and s.angles.count > 0:
if self.forcefield:
self.angle_style = FF_SETTINGS[self.forcefield]['angle_style']
if self.angle_style:
lammps_input += '{:<15} {}\n'.format('angle_style', self.angle_style)
if self.dihedral_style is None and s and s.dihedrals.count > 0:
if self.forcefield:
self.dihedral_style = FF_SETTINGS[self.forcefield]['dihedral_style']
if self.dihedral_style:
lammps_input += '{:<15} {}\n'.format('dihedral_style', self.dihedral_style)
if self.improper_style is None and s and s.impropers.count > 0:
if self.forcefield:
self.improper_style = FF_SETTINGS[self.forcefield]['improper_style']
if self.improper_style:
lammps_input += '{:<15} {}\n'.format('improper_style', self.improper_style)
if self.special_bonds:
lammps_input += '{:<15} {}\n'.format('special_bonds', self.special_bonds)
if self.pair_modify:
lammps_input += '{:<15} '.format('pair_modify')
for k, v in self.pair_modify.items():
lammps_input += '{} {} '.format(k, v)
lammps_input += '\n'
if self.read_data:
lammps_input += '{:<15} {}\n'.format('read_data', self.read_data)
elif s:
s.write_lammps('temp.lmps')
lammps_input += '{:<15} temp.lmps\n'.format('read_data')
if self.pair_style and self.pair_style.startswith('buck'):
for pt1 in s.particle_types:
for pt2 in s.particle_types:
if pt1.tag <= pt2.tag:
a = pow(pt1.a*pt2.a, 0.5)
c = pow(pt1.c*pt2.c, 0.5)
rho = 0.5*(pt1.rho+pt2.rho)
lammps_input += '{:<15} {} {} {} {} {}\n'.format('pair_coeff', pt1.tag, pt2.tag, a, rho, c)
lammps_input += '#'*80 + '\n\n'
return lammps_input
class Region(Item):
"""pysimm.lmps.Region
Template object to create a region in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
name: name id for region
style: LAMMPS region style
*args: args for given style
**kwargs: optional kwargs for region command
"""
def __init__(self, name='all', style='block', *args, **kwargs):
Item.__init__(self, name=name, style=style, args=args, kwargs=kwargs)
def write(self, sim=None):
inp = '{:<15} {name} {style} '.format('region', name=self.name, style=self.style)
for a in self.args:
inp += '{} '.format(a)
if not self.args:
for _ in range(6):
inp += 'EDGE '
for k, v in self.kwargs.items():
inp += '{} {} '.format(k, v)
inp += '\n'
return inp
class CreateBox(Item):
"""pysimm.lmps.CreateBox
Template object to create a box in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
n: number of atom types
region: :class:`~pysimm.lmps.Region` object
**kwargs: optional kwargs for create_box command (replace / with _)
"""
def __init__(self, n=1, region=Region(), *args, **kwargs):
Item.__init__(self, n=n, region=region, args=args, kwargs=kwargs)
def write(self, sim=None):
inp = '{:<15} {n} {region.name} '.format('create_box', **vars(self))
for k, v in self.kwargs.items():
inp += '{} {} '.format(k.replace('_', '/'), v)
inp += '\n'
return inp
class Group(Item):
"""pysimm.lmps.Group
Template object to define a group in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
name: name for the group
style: style for the group
*args: arguments for the given style
"""
def __init__(self, name='all', style='id', *args, **kwargs):
Item.__init__(self, name=name, style=style, args=args, **kwargs)
def write(self, sim=None):
inp = '{:<15} {name} {style} '.format('group', name=self.name, style=self.style)
for a in self.args:
inp += '{} '.format(a)
if not self.args:
inp += '*'
inp += '\n'
return inp
class Velocity(Item):
"""pysimm.lmps.Velocity
Template object to define velocity initialization in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
group: group for velocity command
style: style for the velocity command
*args: arguments for the given style
"""
def __init__(self, group=Group('all'), style='create', *args, **kwargs):
Item.__init__(self, group=group, style=style, args=args, **kwargs)
if self.seed is None:
self.seed = randint(10000, 99999)
if self.temperature is None:
self.temperature = 300.0
if args:
self.from_args = True
def write(self, sim=None):
if isinstance(self.group, Group):
inp = '{:<15} {group.name} {style} '.format('velocity', group=self.group, style=self.style)
else:
inp = '{:<15} {group} {style} '.format('velocity', group=self.group, style=self.style)
if self.from_args:
for a in self.args:
inp += '{} '.format(a)
elif self.style == 'create' or self.style == 'scale':
inp += '{temp} '.format(temp=self.temperature)
if self.style == 'create':
inp += '{seed} '.format(seed=self.seed)
for k in ['dist', 'sum', 'mom', 'rot', 'bias', 'loop', 'rigid', 'units']:
if getattr(self, k):
inp += '{} {} '.format(k, getattr(self, k))
inp += '\n'
return inp
class OutputSettings(object):
"""pysimm.lmps.OutputSettings
Template object to define thermo and dump output settings in a LAMMPS simulation. See LAMMPS documentation for further information
Attributes:
thermo: dictionary of settings for thermo output
dump: dictionary of settings for dump output
"""
def __init__(self, **kwargs):
self.thermo = kwargs.get('thermo')
self.dump = kwargs.get('dump', kwargs.get('trajectory'))
if isinstance(self.thermo, int):
self.thermo = {'freq': self.thermo}
if isinstance(self.thermo, dict):
self.thermo['freq'] = self.thermo.get('freq', 1000)
self.thermo['style'] = self.thermo.get('style', 'custom')
self.thermo['args'] = self.thermo.get('args', ['step', 'time', 'temp', 'vol', 'press', 'etotal', 'epair', 'emol', 'density'])
self.thermo['modify'] = self.thermo.get('modify')
if isinstance(self.dump, int):
self.dump = {'freq': self.dump}
if isinstance(self.dump, dict):
self.dump['freq'] = self.dump.get('freq', 1000)
self.dump['group'] = self.dump.get('group', Group(name='all'))
self.dump['name'] = self.dump.get('name', 'pysimm_dump')
self.dump['style'] = self.dump.get('style', 'custom')
self.dump['filename'] = self.dump.get('filename', 'dump.*')
self.dump['args'] = self.dump.get('args', ['id', 'type', 'mol', 'x', 'y', 'z', 'vx', 'vy', 'vz'])
self.dump['modify'] = self.dump.get('modify')
if isinstance(self.dump, dict) and isinstance(self.dump['group'], str):
self.dump['group'] = Group(name=self.dump['group'])
def write(self, sim=None):
lammps_input = ''
if isinstance(self.thermo, dict):
lammps_input += '\n' + '#'*80 + '\n'
lammps_input += '#'*29 + ' Thermo output ' + '#'*29 + '\n'
lammps_input += '#'*80 + '\n'
lammps_input += '{:<15} {}\n'.format('thermo', self.thermo['freq'])
lammps_input += '{:<15} {} '.format('thermo_style', self.thermo['style'])
if self.thermo['style'] == 'custom':
lammps_input += ' '.join(self.thermo['args'])
lammps_input += '\n'
if self.thermo.get('modify'):
lammps_input += '{:<15} {} '.format('thermo_modify', self.thermo.get('modify'))
lammps_input += '\n'
lammps_input += '#'*80 + '\n\n'
if isinstance(self.dump, dict):
lammps_input += '\n' + '#'*80 + '\n'
lammps_input += '#'*30 + ' Dump output ' + '#'*30 + '\n'
lammps_input += '#'*80 + '\n'
lammps_input += '{:<15} {} {} {} {} {} '.format('dump', self.dump['name'], self.dump['group'].name, self.dump['style'], self.dump['freq'], self.dump['filename'])
if self.dump['style'] == 'custom':
lammps_input += ' '.join(self.dump['args'])
lammps_input += '\n'
if self.dump.get('modify'):
lammps_input += '{:<15} {} {} '.format('dump_modify', self.dump['name'], self.dump.get('modify'))
lammps_input += '\n'
lammps_input += '#'*80 + '\n\n'
return lammps_input
class Qeq(object):
"""pysimm.lmps.MolecularDynamics
Template object to contain LAMMPS qeq settings
Attributes:
cutoff: distance cutoff for charge equilibration
tol: tolerance (precision) for charge equilibration
max_iter: maximum iterations
qfile: file with qeq parameters (leave undefined for defaults)
"""
def __init__(self, **kwargs):
self.cutoff = kwargs.get('cutoff', 10)
self.tol = kwargs.get('tol', 1.0e-6)
self.max_iter = kwargs.get('max_iter', 200)
self.qfile = kwargs.get('qfile')
self.input = ''
def write(self, sim=None):
"""pysimm.lmps.Qeq.write
Create LAMMPS input for a charge equilibration calculation
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
input string
"""
if self.qfile is None:
param_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, 'dat', 'qeq', 'hcno.json')
with open(param_file) as f:
qeq_params = json.loads(f.read())
with open('pysimm.qeq.tmp', 'w') as f:
for pt in sim.system.particle_types:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(pt.tag,
qeq_params[pt.elem]['chi'],
qeq_params[pt.elem]['eta']*2,
qeq_params[pt.elem]['gamma'],
qeq_params[pt.elem]['zeta'],
qeq_params[pt.elem]['qcore']))
self.qfile = 'pysimm.qeq.tmp'
self.input = ''
self.input += 'fix 1 all qeq/point 1 {} {} {} {}\n'.format(self.cutoff, self.tol, self.max_iter, self.qfile)
self.input += 'run 0\n'
self.input += 'unfix 1\n'
return self.input
class MolecularDynamics(object):
"""pysimm.lmps.MolecularDynamics
Template object to contain LAMMPS MD settings
Attributes:
name: name to identify MD
group: :class:`~pysimm.lmps.Group` object for integrator
timestep: timestep value to use during MD
ensemble: 'nvt' or 'npt' or 'nve'; default=nve
limit: numerical value to use with nve when limiting particle displacement
temperature: dictionary of settings for temperature (start, stop, damp)
pressure: dictionary of settings for pressure (start, stop, damp)
run: length of MD simulation in number of timesteps or False to omit run command
unfix: True to include command to unfix integrator after run
rigid: dictionary of settings for a rigid simulation
extra_keywords: dictionary of extra keywords to append at the end of the LAMMPS fix integrator
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name', 'pysimm_md')
self.group = kwargs.get('group', Group(name='all'))
self.timestep = kwargs.get('timestep', 1)
self.ensemble = kwargs.get('ensemble', 'nve')
self.limit = kwargs.get('limit')
self.temperature = kwargs.get('temperature', kwargs.get('temp', 300.))
self.pressure = kwargs.get('pressure', 1.)
self.run = kwargs.get('run', kwargs.get('length', 2000))
self.unfix = kwargs.get('unfix', True)
self.rigid = kwargs.get('rigid')
self.extra_keywords = kwargs.get('extra_keywords', {})
if kwargs.get('temp') is not None:
print('temp keyword argument is deprecated for MolecularDynamics, please use temperature instead')
if isinstance(self.group, str):
self.group = Group(name=self.group)
if isinstance(self.temperature, int) or isinstance(self.temperature, float):
self.temperature = {'start': self.temperature}
if isinstance(self.pressure, int) or isinstance(self.pressure, float):
self.pressure = {'start': self.pressure}
if isinstance(self.rigid, dict):
self.ensemble = 'rigid/{}'.format(self.ensemble)
if self.rigid.get('small'):
self.ensemble += '/small '
self.input = ''
def write(self, sim=None):
"""pysimm.lmps.MolecularDynamics.write
Create LAMMPS input for a molecular dynamics simulation.
Args:
sim: pysimm.lmps.Simulation object reference
Returns:
input string
"""
self.input = ''
self.input += '{:<15} {}\n'.format('timestep', self.timestep)
self.input += '{:<15} {} {} {}'.format('fix', self.name, self.group.name, self.ensemble)
if self.ensemble == 'nve' and self.limit:
self.input += '/limit {} '.format(self.limit)
else:
self.input += ' '
if self.rigid:
self.input += '{} '.format(self.rigid.get('style', 'molecule'))
if self.rigid.get('style') == 'group':
assert isinstance(self.rigid.get('groups'), list)
self.input += ' {} '.format(len(self.rigid.get('groups')))
for g in self.rigid.get('groups'):
if isinstance(g, Group):
group_name = g.name
else:
group_name = g
self.input += '{} '.format(group_name)
if 't' in self.ensemble:
self.input += 'temp {} {} {} '.format(self.temperature.get('start', 300.), self.temperature.get('stop', self.temperature.get('start', 300.)), self.temperature.get('damp', 100*self.timestep))
if 'p' in self.ensemble:
self.input += '{} {} {} {} '.format(self.pressure.get('iso', 'aniso'), self.pressure.get('start', 1.), self.pressure.get('stop', self.pressure.get('start', 1.)), self.pressure.get('damp', 1000*self.timestep))
for k, v in self.extra_keywords.items():
self.input += '{} {} '.format(k, v)
self.input += '\n'
if self.run is not False:
self.input += '{:<15} {}\n'.format('run', int(self.run))
if self.run and self.unfix:
self.input += 'unfix {}\n'.format(self.name)
return self.input
class SteeredMolecularDynamics(MolecularDynamics):
def __init__(self, **kwargs):
MolecularDynamics.__init__(self, **kwargs)
self.p1 = kwargs.get('p1')
self.p2 = kwargs.get('p2')
self.k = kwargs.get('k', 20.0)
self.v = kwargs.get('v', 0.001)
self.d = kwargs.get('d', 3.0)
def write(self, sim=None):
"""pysimm.lmps.SteeredMolecularDynamics.write
Create LAMMPS input for a steered molecular dynamics simulation.
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
input string
"""
self.input = ''
if self.thermo:
self.input += 'thermo %s\n' % int(self.thermo)
if self.thermo_style:
self.input += 'thermo_style %s\n' % self.thermo_style
self.input += 'timestep %s\n' % self.timestep
if self.ensemble == 'nvt':
self.input += 'fix 1 all %s temp %s %s %s\n' % (self.ensemble, self.t_start, self.t_stop, self.tdamp)
elif self.ensemble == 'npt':
self.input += ('fix 1 all %s temp %s %s %s iso %s %s %s\n'
% (self.ensemble, self.t_start, self.t_stop, self.tdamp, self.p_start, self.p_stop, self.pdamp))
elif self.ensemble == 'nve':
self.input += 'fix 1 all %s\n' % self.ensemble
if self.new_v:
self.input += 'velocity all create %s %s\n' % (self.t_start, self.seed)
elif self.scale_v:
self.input += 'velocity all scale %s\n' % self.t_start
if self.dump:
if self.dump_name:
self.input += ('dump pysimm_dump all atom %s %s.lammpstrj\n'
% (self.dump, self.dump_name))
elif sim.name:
self.input += ('dump pysimm_dump all atom %s %s.lammpstrj\n'
% (self.dump, '_'.join(sim.name.split())))
else:
self.input += ('dump pysimm_dump all atom %s pysimm_dump.lammpstrj\n'
% self.dump)
if self.dump_append:
self.input += 'dump_modify pysimm_dump append yes\n'
self.input += 'group p1 id {}\n'.format(self.p1.tag)
self.input += 'group p2 id {}\n'.format(self.p2.tag)
self.input += 'fix steer p1 smd cvel {} {} couple p2 auto auto auto {}\n'.format(self.k, self.v, self.d)
self.input += 'run %s\n' % int(self.length)
self.input += 'unfix 1\n'
self.input += 'unfix steer\n'
if self.dump:
self.input += 'undump pysimm_dump\n'
return self.input
class Minimization(object):
"""pysimm.lmps.Minimization
Template object to contain LAMMPS energy minimization settings.
Attributes:
min_style: LAMMPS minimization style default='sd'
dmax: how far any atom can move in a single line search in any dimension
etol: energy tolerance default=1e-3
ftol: force tolerance default=1e-3
maxiter: maximum iterations default=10000
max eval: maximum force evaluations default=100000
"""
def __init__(self, **kwargs):
self.min_style = kwargs.get('min_style', 'fire')
self.dmax = kwargs.get('dmax')
self.etol = kwargs.get('etol', 1.0e-3)
self.ftol = kwargs.get('ftol', 1.0e-3)
self.maxiter = kwargs.get('maxiter', 10000)
self.maxeval = kwargs.get('maxeval', 100000)
self.input = ''
def write(self, sim=None):
"""pysimm.lmps.Minimization.write
Create LAMMPS input for an energy minimization simulation.
Args:
sim: :class:`~pysimm.lmps.Simulation` object reference
Returns:
input string
"""
self.input = ''
self.input += 'min_style %s\n' % self.min_style
if self.dmax:
self.input += 'min_modify dmax %s\n' % self.dmax
self.input += ('minimize %s %s %s %s\n' % (self.etol, self.ftol,
self.maxiter, self.maxeval))
return self.input
class CustomInput(object):
"""pysimm.lmps.CustomInput
Template object to contain custom LAMMPS input.
Attributes:
custom_input: custom input string
"""
def __init__(self, custom_input):
self.input = '{}\n'.format(custom_input)
def write(self, sim=None):
"""pysimm.lmps.CustomInput.write
Create LAMMPS input for a custom simulation.
Args:
sim: pysimm.lmps.Simulation object reference
Returns:
input string
"""
return self.input
class Simulation(object):
"""pysimm.lmps.Simulation
Organizational object for LAMMPS simulation. Should contain combination of
:class:`~pysimm.lmps.MolecularDynamics`, :class:`~pysimm.lmps.Minimization`, and/or :class:`~pysimm.lmps.CustomInput` object.
Attributes:
forcefield: name of force field for simulation settings
name: name for simulation
log: LAMMPS log filename
write: file name to write final LAMMPS data file default=None
print_to_screen: True to have LAMMPS output printed to stdout after simulation ends
debug: True to have LAMMPS output streamed to stdout during simulation (WARNING: this may degrade performance)
custom: option to flag simulation as purley custom input to skip prepaing initialization
"""
def __init__(self, s, **kwargs):
self.system = s
self.forcefield = kwargs.get('forcefield')
if self.forcefield is None and s and s.forcefield is not None:
self.forcefield = s.forcefield
self.debug = kwargs.get('debug', False)
self.print_to_screen = kwargs.get('print_to_screen', False)
self.name = kwargs.get('name', False)
self.log = kwargs.get('log')
self.write = kwargs.get('write', False)
self.custom = kwargs.get('custom')
self._input = ''
self.sim = kwargs.get('sim', [])
def add(self, *args):
for item in args:
if isinstance(item, str):
self.sim.append(CustomInput(item))
else:
self.sim.append(item)
return item
def add_qeq(self, template=None, **kwargs):
"""pysimm.lmps.Simulation.add_qeq
Add :class:`~pysimm.lmps.Qeq` template to simulation
Args:
template: :class:`~pysimm.lmps.Qeq` object reference
**kwargs: if template is None these are passed to :class:`~pysimm.lmps.Qeq` constructor to create new template
"""
if template is None:
self.sim.append(Qeq(**kwargs))
elif isinstance(template, Qeq):
self.sim.append(template)
else:
error_print('you must add an object of type Qeq to Simulation')
def add_md(self, template=None, **kwargs):
"""pysimm.lmps.Simulation.add_md
Add :class:`~pysimm.lmps.MolecularDyanmics` template to simulation
Args:
template: :class:`~pysimm.lmps.MolecularDynamics` object reference
**kwargs: if template is None these are passed to :class:`~pysimm.lmps.MolecularDynamics` constructor to create new template
"""
if template is None:
self.sim.append(MolecularDynamics(**kwargs))
elif isinstance(template, MolecularDynamics):
self.sim.append(template)
else:
error_print('you must add an object of type MolecularDynamics to Simulation')
def add_min(self, template=None, **kwargs):
"""pysimm.lmps.Simulation.add_min
Add :class:`~pysimm.lmps.Minimization` template to simulation
Args:
template: :class:`~pysimm.lmps.Minimization` object reference
**kwargs: if template is None these are passed to :class:`~pysimm.lmps.Minimization` constructor to create new template
"""
if template is None:
self.sim.append(Minimization(**kwargs))
elif isinstance(template, Minimization):
self.sim.append(template)
else:
error_print('you must add an object of type Minimization to Simulation')
def add_custom(self, custom=''):
"""pysimm.lmps.Simulation.add_custom
Add custom input string to simulation
Args:
custom: custom LAMMPS input string to add to Simulation
"""
self.sim.append(CustomInput(custom))
@property
def input(self):
self.write_input()
return self._input
def write_input(self, init=True):
"""pysimm.lmps.Simulation.write_input
Creates LAMMPS input string including initialization and input from templates/custom input
Args:
None
Returns:
None
"""
self._input = ''
if self.log:
self._input += 'log {} append\n\n'.format(self.log)
for task in self.sim:
if isinstance(task, Init):
init = False
if init and not self.custom:
self.sim.insert(0, Init(forcefield=self.forcefield))
for template in self.sim:
self._input += template.write(self)
self._input += 'write_dump all custom pysimm.dump.tmp id q x y z vx vy vz\n'
self._input += 'quit\n'
def run(self, np=None, nanohub=None, save_input=True, prefix='mpiexec'):
"""pysimm.lmps.Simulation.run
Begin LAMMPS simulation.
Args:
np: number of threads to use (serial by default) default=None
nanohub: dictionary containing nanohub resource information default=None
init: True to write initialization part of LAMMPS input script (set to False if using complete custom input)
save_input: True to save input as pysimm.sim.in
prefix: prefix for running LAMMPS (i.e. - mpiexec)
"""
if isinstance(save_input, str):
with open(save_input, 'w') as f:
f.write(self.input)
elif save_input is True:
with open('pysimm.sim.in', 'w') as f:
f.write(self.input)
try:
call_lammps(self, np, nanohub, prefix=prefix)
except OSError:
raise PysimmError('There was a problem calling LAMMPS with {}'.format(prefix))
except IOError:
if check_lmps_exec():
raise PysimmError('There was a problem running LAMMPS. The process started but did not finish successfully. Check the log file, or rerun the simulation with debug=True to debug issue from LAMMPS output')
else:
raise PysimmError('There was a problem running LAMMPS. LAMMPS is not configured properly. Make sure the LAMMPS_EXEC environment variable is set to the correct LAMMPS executable path. The current path is set to:\n\n{}'.format(LAMMPS_EXEC))
def enqueue_output(out, queue):
"""pysimm.lmps.enqueue_output
Helps queue output for printing to screen during simulation.
"""
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def call_lammps(simulation, np, nanohub, prefix='mpiexec'):
"""pysimm.lmps.call_lammps
Wrapper to call LAMMPS using executable name defined in pysimm.lmps module.
Args:
simulation: :class:`~pysimm.lmps.Simulation` object reference
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
prefix: prefix for running LAMMPS (i.e. - mpiexec)
Returns:
None
"""
log_name = simulation.log or 'log.lammps'
if nanohub:
with open('temp.in', 'w') as f:
f.write(simulation.input)
if simulation.name:
print('%s: sending %s simulation to computer cluster at nanoHUB' % (strftime('%H:%M:%S'), simulation.name))
else:
print('%s: sending simulation to computer cluster at nanoHUB' % strftime('%H:%M:%S'))
sys.stdout.flush()
cmd = ('submit -n %s -w %s -i temp.lmps -i temp.in '
'lammps-09Dec14-parallel -e both -l none -i temp.in'
% (nanohub.get('cores'), nanohub.get('walltime')))
cmd = shlex.split(cmd)
exit_status, stdo, stde = RapptureExec(cmd)
else:
if simulation.name:
print('%s: starting %s LAMMPS simulation'
% (strftime('%H:%M:%S'), simulation.name))
else:
print('%s: starting LAMMPS simulation'
% strftime('%H:%M:%S'))
if np:
p = Popen([prefix, '-np', str(np),
LAMMPS_EXEC, '-e', 'both'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
else:
p = Popen([LAMMPS_EXEC, '-e', 'both'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
simulation.write_input()
if simulation.debug:
print(simulation.input)
warning_print('debug setting involves streaming output from LAMMPS process and can degrade performance')
warning_print('only use debug for debugging purposes, use print_to_screen to collect stdout after process finishes')
p.stdin.write(simulation.input)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
t.daemon = True
t.start()
while t.isAlive() or not q.empty():
try:
line = q.get_nowait()
except Empty:
pass
else:
if simulation.debug:
sys.stdout.write(line)
sys.stdout.flush()
else:
stdo, stde = p.communicate(simulation.input.encode('utf-8'))
if simulation.print_to_screen:
print(stdo)
print(stde)
simulation.system.read_lammps_dump('pysimm.dump.tmp')
try:
os.remove('temp.lmps')
except OSError as e:
print(str(e))
if os.path.isfile('pysimm.qeq.tmp'):
os.remove('pysimm.qeq.tmp')
try:
os.remove('pysimm.dump.tmp')
if simulation.name:
print('%s: %s simulation using LAMMPS successful'
% (strftime('%H:%M:%S'), simulation.name))
else:
print('%s: simulation using LAMMPS successful'
% (strftime('%H:%M:%S')))
except OSError as e:
if simulation.name:
raise PysimmError('%s simulation using LAMMPS UNsuccessful' % simulation.name)
else:
raise PysimmError('simulation using LAMMPS UNsuccessful')
def qeq(s, np=None, nanohub=None, **kwargs):
"""pysimm.lmps.qeq
Convenience function to call a qeq calculation. kwargs are passed to :class:`~pysimm.lmps.Qeq` constructor
Args:
s: system to perform simulation on
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
Returns:
None
"""
sim = Simulation(s, **kwargs)
sim.add_qeq(**kwargs)
sim.run(np, nanohub)
def quick_md(s, np=None, nanohub=None, **kwargs):
"""pysimm.lmps.quick_md
Convenience function to call an individual MD simulation. kwargs are passed to MD constructor
Args:
s: system to perform simulation on
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
Returns:
None
"""
sim = Simulation(s, **kwargs)
sim.add_md(**kwargs)
sim.run(np, nanohub)
def quick_min(s, np=None, nanohub=None, **kwargs):
"""pysimm.lmps.quick_min
Convenience function to call an individual energy minimization simulation. kwargs are passed to min constructor
Args:
s: system to perform simulation on
np: number of threads to use
nanohub: dictionary containing nanohub resource information default=None
Returns:
None
"""
sim = Simulation(s, **kwargs)
sim.add_min(**kwargs)
sim.run(np, nanohub)
def energy(s, all=False, np=None, **kwargs):
"""pysimm.lmps.energy
Convenience function to calculate energy of a given :class:`~pysimm.system.System` object.
Args:
s: system to calculate energy
all: returns decomposition of energy if True (default: False)
np: number of threads to use for simulation
Returns:
total energy or disctionary of energy components
"""
sim = Simulation(s, log='pysimm_calc.tmp.log', **kwargs)
sim.add(OutputSettings(thermo={
'freq': 1,
'style': 'custom step etotal epair emol evdwl ecoul ebond eangle edihed eimp'
}))
sim.add_md(length=0, **kwargs)
sim.run(np)
log = LogFile('pysimm_calc.tmp.log')
try:
os.remove('pysimm_calc.tmp.log')
except:
error_print('error likely occurred during simulation')
if all:
return log.data.loc[0]
else:
return log.data.loc[0].TotEng
def check_lmps_attr(s):
# sync of the forcefield-style properties
if hasattr(s, 'forcefield'):
styles_list = FF_SETTINGS['dreiding'].keys()
if s.forcefield in FF_SETTINGS.keys():
for st_prop in styles_list:
setattr(s, st_prop, FF_SETTINGS[s.forcefield][st_prop])
else:
warning_print('Cannot synchronize given forcefield with LAMMPS representation types. '
'The forcefield is not present in the FF_SETTINGS of the pysimm.lmps module')
else:
warning_print('The forcefield attribute of the system is not defined. Some i/o methods of lmps '
'module will not be acessible')
class LogFile(object):
"""pysimm.lmps.LogFile
Class to read LAMMPS log file into Pandas DataFrame stored in LogFile.data
Attributes:
fname: filename of log file
data: resulting DataFrame with log file data
"""
def __init__(self, fname):
if not pd:
raise PysimmError('pysimm.lmps.LogFile function requires pandas')
self.filename = fname
self.data = pd.DataFrame()
self._read(self.filename)
def _read(self, fname):
with open(fname) as fr:
copy = False
for line in fr:
if line.startswith('Step'):
strio = StringIO()
copy = True
names = line.strip().split()
elif line.startswith('Loop'):
copy = False
strio.seek(0)
self.data = self.data.append(pd.read_table(strio, sep='\s+', names=names, index_col='Step'))
elif copy:
strio.write(line)
| 38.205491 | 254 | 0.56647 |
bb1a648760feeae53d2fdeb72ed8b7594bd86c4b | 24,311 | py | Python | Assignments/HW10/curl_grading.py | chenchuw/EC602-Design-by-Software | c233c9d08a67abc47235282fedd866d67ccaf4ce | [
"MIT"
] | null | null | null | Assignments/HW10/curl_grading.py | chenchuw/EC602-Design-by-Software | c233c9d08a67abc47235282fedd866d67ccaf4ce | [
"MIT"
] | null | null | null | Assignments/HW10/curl_grading.py | chenchuw/EC602-Design-by-Software | c233c9d08a67abc47235282fedd866d67ccaf4ce | [
"MIT"
] | 1 | 2022-01-11T20:23:47.000Z | 2022-01-11T20:23:47.000Z | """curl_grading.py: tools for analyzing and checking C++ and Py programs"""
import subprocess as sub
import difflib
import unittest
import re
import tokenize
import dis
import io
import cpplint
import sys
import pycodestyle
import logging
import os
import random
import importlib
import multiprocessing
from io import StringIO
import time
from subprocess import PIPE,Popen,run,TimeoutExpired
DEBUG = False
# 1.1 incorporate new checker from fall 2020
# 1.2 fix style point handling
# 1.3 fix Penalty, and allows argv[]
# 1.4 update compile return
# 2.0 switch to Points/MaxPoints to allow for more partial points
# 2.1 move testorder functionality in to setupClass
# 2.2 some format improvements in grade reporting
# 2.3 case sensitive check for file systems.
# 2.4 allow for no Penalty in testcase
# 2.5 improved case text handling
# 2.6 add self.authors
# 3.0 rename curl_grading.py
# 3.1 improve the bracket counting
# 3.2 include subdir, allow style points <10
VERSION = (3, 2)
# path = os.environ['PATH']
# if path.startswith(".:") or path.endswith(":.") or ":.:" in path:
# pass # path ok
# else:
# print("""Your path is not set correctly. The checker will not work
# unless you add "." the current working directory to your PATH.
# You can do this by editing ~/.zshrc
# """,file=sys.stderr)
# sys.exit(42)
class TimeoutException(Exception):
pass
class RunableProcessing(multiprocessing.Process):
def __init__(self, func, *args, **kwargs):
self.queue = multiprocessing.Queue(maxsize=1)
args = (func,) + args
multiprocessing.Process.__init__(self, target=self.run_func, args=args, kwargs=kwargs)
def run_func(self, func, *args, **kwargs):
try:
result = func(*args, **kwargs)
self.queue.put((True, result))
except Exception as e:
self.queue.put((False, e))
def done(self):
return self.queue.full()
def result(self):
return self.queue.get()
def timeout(seconds, force_kill=True):
def wrapper(function):
def inner(*args, **kwargs):
now = time.time()
proc = RunableProcessing(function, *args, **kwargs)
proc.start()
proc.join(seconds)
if proc.is_alive():
if force_kill:
proc.terminate()
runtime = int(time.time() - now)
raise TimeoutException('timed out after {0} seconds'.format(runtime))
assert proc.done()
success, result = proc.result()
if success:
return result
else:
raise result
return inner
return wrapper
STDLINT = ['-readability/alt_tokens',"+build/include_alpha","-build/include_subdir"]
ignore_lint = [x[1:] for x in STDLINT if x.startswith('-')]
ASTYLE_OPTIONS = [
'--style=google', '--indent=spaces=2', '--formatted', '--dry-run'
]
COMMENT_STRING = {'py': '#', 'sh': "#", 'cpp': '//'}
#CPP_CODE_ONLY = [
# 'g++', '-std=c++14', '-P', '-x', 'c++', '-dD', '-E', '-fpreprocessed'
#]
def silent_import(fname, q):
s = StringIO()
sys.stdout = s
themod = None
try:
themod = importlib.import_module(fname)
except Exception as e:
q.put("fail")
return
q.put("success")
def my_import(modname, code):
filename = modname+".py"
with open(filename,'w') as f:
f.write(code)
q = multiprocessing.Queue()
T = multiprocessing.Process(target=silent_import,args=(modname, q))
T.start()
try:
result = q.get(True,1)
except Exception as e:
repeat_terminate(T,0.1)
return False
if result=="success":
return importlib.import_module(modname)
return False
def safe_remove(filename):
try:
os.remove(filename)
except Exception as e:
print(e)
def numbers_only(word_lines):
rr=[]
for v in word_lines:
g=v.split()
nums=[]
for x in g:
try:
nums.append(int(x))
except:
try:
nums.append(float(x))
except:
pass
rr.append(nums)
return rr
bracket_msg="""It is recommended to avoid the use of brackets in C++, i.e., these [ ] or these <: :>
a) Use .at() or other methods instead
b) replace c-style arrays with vectors or strings etc
c) if you must use a c-style array (e.g. argv) use pointers
You have {} brackets.
"""
report_msg="""
===============================
Checking {course} {prog}.
{version}
================================
Information
-----------
{info}
Passed Tests
------------
{passed}
Failed Tests
------------
{failed}
Grading
-------
{grade}"""
AUTHWARN = "WARNING, NO VALID AUTHOR LINES FOUND"
def setup_py(cls, prefix):
with open(cls.realfilename) as f:
cls.file_contents=f.read()
cls.module_name = prefix+str(random.randint(1000,100000))
cls.module_tested = my_import(cls.module_name, cls.file_contents)
if not cls.module_tested:
safe_remove(cls.module_name+".py")
raise unittest.SkipTest(f'During test of {cls.__doc__}, unable to import your module. Timeout or error')
def compile_main(cls,prefix):
if not hasattr(cls,'lintoptions'):
cls.lintoptions = STDLINT
try:
with open(cls.realfilename) as f:
cls.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile_main, {cls.realfilename} not found.")
cls.executable = prefix+str(random.randint(1000,100000))
cls.new_source_file_main = cls.executable + ".cpp"
with open(cls.new_source_file_main,'w') as f:
f.write(cls.file_contents_main)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',
cls.new_source_file_main,"-o",cls.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
self.executable = None
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(cls.new_source_file_main)
cls.code_metrics = code_analysis_cpp(cls.realfilename,cls.lintoptions)
return T.stderr
def compile_separate(cls,prefix):
if not hasattr(cls,'lintoptions'):
cls.lintoptions = STDLINT
try:
with open(cls.realfilename) as f:
cls.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile_separate, {cls.realfilename} not found.")
cls.executable = prefix+str(random.randint(1000,100000))
cls.new_source_file_main = cls.executable + ".cpp"
with open(cls.new_source_file_main,'w') as f:
f.write(cls.file_contents_main)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',
cls.realfilename,cls.new_source_file_main,"-o",cls.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(cls.new_source_file_main)
cls.code_metrics = code_analysis_cpp(cls.realfilename,cls.lintoptions)
def compile(self,prefix):
if not hasattr(self,'lintoptions'):
self.lintoptions = STDLINT
try:
with open(self.realfilename) as f:
self.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile, {self.realfilename} not found.")
self.executable = prefix+str(random.randint(1000,100000))
new_source_file = self.executable + ".cpp"
with open(new_source_file,'w') as f:
f.write(self.file_contents)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',new_source_file,"-o",self.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(new_source_file)
self.code_metrics = code_analysis_cpp(self.realfilename,self.lintoptions)
return (T.returncode,T.stderr)
def compile_and_run(self,prefix):
compile(self,prefix)
try:
T = sub.run([self.executable],stdout=sub.PIPE,stderr=sub.PIPE,timeout=1,universal_newlines=True)
except Exception as e:
safe_remove(self.executable)
raise unittest.SkipTest("Failed to run.\n"+str(e))
self.output = T.stdout
self.errors = T.stderr
def bracket_check(self):
"brackets. check for brackets"
bracket_count = self.code_metrics['brackets']
if bracket_count:
self.fail(bracket_msg.format(bracket_count))
def test_includes(self):
"libraries. check the included libraries are allowed"
includes = get_includes(self.file_contents)
self.msgs.append('included libraries : {}\n'.format(" ".join(includes) if includes else "None"))
if self.valid_includes=="Any":
return
invalid_includes = includes - self.valid_includes
if invalid_includes:
self.fail('Invalid includes: {}'.format(" ".join(x for x in invalid_includes)))
def test_imports(self):
"libraries. check the imported modules are allowed"
includes = get_python_imports(self.file_contents)
self.msgs.append('imported modules : {}\n'.format(" ".join(includes) if includes else "None"))
if self.valid_includes=="Any":
return
invalid_includes = includes - self.valid_includes
if invalid_includes:
self.fail('Invalid imports: {}'.format(" ".join(x for x in invalid_includes)))
def test_libraries(self):
"libraries. check the included libraries/modules are allowed"
if self.realfilename.endswith('cpp'):
test_includes(self)
else:
test_imports(self)
def test_authors(self):
"authors. check on authors' emails identified"
authors = get_authors(self.file_contents, progtype(self.realfilename))
self.authors = authors[:]
self.msgs.append('authors : {}\n'.format(" ".join(authors)
if authors else AUTHWARN))
if len(authors)==0:
self.fail('No authors found in your document.')
elif len(authors) > self.authorlimit:
self.fail('Author limit {self.authorlimit} exceeded.')
def test_pystyle(self):
"style. python code style and analysis"
proc_pycodestyle = sub.run(['pycodestyle', self.realfilename], stdout=sub.PIPE)
prob = False
if proc_pycodestyle.returncode:
prob = proc_pycodestyle.stdout.decode().rsplit(" ", 1)[-1].strip()
self.msgs.append("pycodestyle check: {}\n".format("{} problems".format(
len(proc_pycodestyle.stdout.decode().splitlines())) if prob else "ok"))
proc_pylint = sub.run(
['pylint', self.realfilename], stdout=sub.PIPE,stderr=sub.PIPE)
pylint_report = proc_pylint.stdout.decode().splitlines()
if len(pylint_report)<2:
logging.error('bad pylint_report'+proc_pylint.stdout.decode())
pylint_score = 0
elif "previous" in pylint_report[-2]:
pylint_score=pylint_report[-2].split()[6]
else:
pylint_score = pylint_report[-2].split()[-1]
self.msgs.append("pylint score : {}\n".format(pylint_score))
code_metrics = code_analysis_py(self.file_contents)
self.msgs.append(code_size_report(code_metrics, self.refcode))
comments = 0
for line in self.file_contents.splitlines():
if '#' in line:
comments += 1
self.msgs.append("comments : {}\n".format(comments))
def test_cppstyle(self):
"style. C++ code style and analysis"
comments = 0
for line in self.file_contents.splitlines():
if '//' in line:
comments += 1
cm = self.code_metrics
if cm['errors']:
numerrors=sum(len(x) for x in cm['errors'].values())
self.msgs.append(f"cpplint : {numerrors} problems")
cpplint_call_list = [
'cpplint', '--filter=' + ','.join(self.lintoptions), self.__doc__
]
self.msgs.append(' [using {}]\n\n'.format(' '.join(cpplint_call_list)))
for e in cm['errors']:
for x in cm['errors'][e]:
self.msgs.append(' line {} ({}): {}'.format(*x))
else:
self.msgs.append("cpplint : ok")
self.msgs.append(f"astyle : {cm['astyle']:.1%} code unchanged.")
self.msgs.append(code_size_report(cm, self.refcode))
self.msgs.append(f"comments : {comments}")
stylegrade(self)
def stylegrade(cls):
cls.stylemax=cls.Points['style']
try:
D = cls.code_metrics['errors']
except Exception as e:
cls.fail(cls,f'Something went wrong: {e}')
cpplint_count= sum(len(D[x]) for x in D)
as_grade = 5*cls.code_metrics['astyle']
cls.msgs.append(f"astyle[max 5] {as_grade:.2f}")
lint_grade = max(0, 5-cpplint_count)
cls.msgs.append(f"cpplint[max 5] {lint_grade} (1 point deduction for each problem)")
cls.Points['style'] = round(as_grade + lint_grade,2)/10*cls.stylemax
cls.msgs.append(f"overall style grade[max 10] {cls.Points['style']:.2f}")
def test_style(self):
"style. test program style"
if self.program.endswith('cpp'):
test_cppstyle(self)
elif self.program.endswith('py'):
test_pystyle(self)
else:
self.msgs.append(f'Dont now how to check style of {self.program}')
def read_file(filename):
"read the contents of filename into string"
filehand = open(filename)
contents = filehand.read()
filehand.close()
return contents
def read_file_for_cpplint(filename):
"read the contents of filename into list of strings"
filehand = open(filename)
contents = filehand.read()
filehand.close()
lines = contents.splitlines()
if contents.endswith('\n'):
lines.append('')
return lines
def make_grades(gradesummary,cls,special_str="",spec_grade=0):
grade = 0
grade_report = special_str
grade_report += "\n"
for test in sorted(cls.Points):
if cls.Points[test]==int(cls.Points[test]):
grade_report += f" {test}({cls.Points[test]} / {cls.MaxPoints[test]})\n"
else:
grade_report += f" {test}({cls.Points[test]:.2f} / {cls.MaxPoints[test]})\n"
grade += cls.Points[test]
grade_report += "\n"
if hasattr(cls,"Penalty"):
for test in cls.Penalty:
if test in gradesummary['fail']:
grade_report += "Penalty for failed test {}: {}\n".format(test,cls.Penalty[test])
grade -= cls.Penalty[test]
grade = max(grade+spec_grade,0)
grade_report += f"\nGrade: {grade:5.2f}"
return grade, grade_report
def code_analysis_cpp(program_filename,lintoptions):
ignore_lint = [x[1:] for x in lintoptions if x.startswith('-')]
Errors = {}
def error_fcn(filename,line_number,lint_type,level,message):
category,subcategory = lint_type.split('/')
if category not in Errors:
Errors[category]=[]
Errors[category].append( (line_number,lint_type,message) )
lines = read_file_for_cpplint(program_filename)
cpplint.RemoveMultiLineComments(program_filename,lines,error_fcn)
clean_lines = cpplint.CleansedLines(lines)
cpplint.ProcessFileData(program_filename,'cpp',lines,error_fcn)
the_lines = [x for x in clean_lines.lines if x]
num_lines=len(the_lines)
num_words = sum(len(x.split()) for x in the_lines)
num_brackets = sum(x.count('[') for x in the_lines)
num_brackets += sum(x.count('<:') for x in the_lines)
num_brackets -= sum(x.count('argv[') for x in the_lines)
original = read_file(program_filename)
proc_astyle = sub.run(
['astyle', *ASTYLE_OPTIONS],
input=original.encode(),
stdout=sub.PIPE,
stderr=sub.PIPE)
if proc_astyle.returncode:
unchanged='error'
else:
original = original.splitlines()
newprog = proc_astyle.stdout.decode().splitlines()
matcher = difflib.SequenceMatcher()
matcher.set_seqs(original, newprog)
unchanged = matcher.ratio()
RealErrors={}
for e in Errors:
RealErrors[e]=[]
for x in Errors[e][:3]:
ignore=False
for s in ignore_lint:
if x[1] in s:
ignore=True;
if not ignore:
RealErrors[e].append(x)
if not RealErrors[e]:
del RealErrors[e]
return {'brackets':num_brackets,
'lines': num_lines,
'words': num_words,
'errors':RealErrors,
'astyle':unchanged}
def isstring(x):
x=x.strip()
if not x:
return True
elif x.startswith('#'):
return True
elif x.startswith('"""') and x.endswith('"""'):
return True
elif x.startswith("'''") and x.endswith("'''"):
return True
elif x.startswith('"') and x.endswith('"'):
return True
elif x.startswith("'") and x.endswith("'"):
return True
def code_analysis_py(program_contents):
"count lines and words in python"
# remove docstrings
for search_str in ('\"\"\"[^\"]*\"\"\"',"\'\'\'[^\']*\'\'\'"):
for x in re.findall(search_str,program_contents,flags=re.MULTILINE|re.DOTALL):
program_contents = program_contents.replace(x,'')
srclines=program_contents.splitlines()
# remove single line strings.
srclines = [x for x in program_contents.splitlines() if not isstring(x)]
src ="\n".join(srclines)
#print(src)
return {'lines': len(src.splitlines()), 'words': len(src.split())}
pylint_options=["--enable=all","--reports=yes","--persistent=no",
"--msg-template='{category:10s}:{line:3d},{column:2d}: {msg} ({symbol})'"]
def pylint_check(program_name):
process = sub.run(['pylint',program_name,*pylint_options],
stdout=sub.PIPE,universal_newlines=True)
out_str = process.stdout
for scoreline in out_str.splitlines()[-4:]:
try:
score = float(re.search('Your code has been rated at ([\d|\.]*)/10',scoreline).groups()[0])
return score, out_str
except:
pass
raise ValueError('could not get your pylint score')
def pycodestyle_check(filename):
"run pycodestyle, return #errors and error string"
pycodestyle_res = io.StringIO()
sys.stdout = pycodestyle_res
pycodestyle_errors = pycodestyle.Checker(filename).check_all()
sys.stdout = sys.__stdout__
res = pycodestyle_res.getvalue()
return pycodestyle_errors,res
def progtype(program):
"which type, cpp or py"
try:
_, program_type = program.split('.')
except:
return "sh"
return program_type
def get_includes(file_contents):
"get included libraries in C/C++"
includes = set()
for line in file_contents.lower().splitlines():
text = line.strip()
search_str = r"#include\s*<(.*)>"
matches = re.match(search_str, text)
if matches:
includes.add(matches.group(1))
matches = re.match("#include \"(.*)\"", text)
if matches:
includes.add(matches.group(1))
return includes
def get_python_imports(file_contents):
"get the imports of file_contents as a set"
try:
instructions = dis.get_instructions(file_contents)
imports = [__ for __ in instructions if 'IMPORT' in __.opname]
except:
return {'ERROR PROCESSING PYTHON SCRIPT'}
grouped = set()
for instr in imports:
if instr.opname == "IMPORT_NAME":
grouped.add(instr.argval)
return grouped
def get_authors(file_contents, ptype,buedu=True):
"""get the authors in file_contents"""
authors = []
if ptype == 'json':
A = json.loads(file_contents)
return A.get('authors',[])
for line in file_contents.lower().splitlines():
if line.startswith(COMMENT_STRING[ptype]) and "copyright" in line:
try:
_, email = line.strip().rsplit(" ", 1)
if email.endswith('@bu.edu'):
authors.append(email if buedu else email.split("@")[0])
elif email.endswith('\r'):
authors.append('DONT_USE_WINDOWS_ENDLINES')
except:
pass
return authors
def check_program(testclass,course=None,email=None,versioninfo=None,theprog=None):
"""return any errors as a list of strings"""
errors = []
passed = []
gradesummary = {'pass': [], 'fail': []}
testclass.realfilename = theprog
if hasattr(testclass, "setUpClass"):
try:
testclass.setUpClass()
except Exception as e:
return f"{testclass} setup fail {e}",0
loader = unittest.loader.TestLoader()
tests = loader.loadTestsFromTestCase(testclass)
def f(test,order):
testname=test.shortDescription().split('.')[0]
i = order.index(testname)
return i
if hasattr(testclass,"testorder"):
alltests = sorted(tests,key=lambda x: f(x,testclass.testorder))
else:
alltests = sorted(tests, key=lambda x: x.shortDescription())
for test in alltests:
#if testclass.program.endswith('py') and test.shortDescription().startswith('bracket'):
# continue
if DEBUG: print('running test:' ,test.shortDescription())
run = test.run()
if run.wasSuccessful():
thetest = test.shortDescription().split('.')[0]
if thetest != 'style':
passed.append('{}\n'.format(test.shortDescription()))
gradesummary['pass'].append(test.shortDescription().split('.')[0])
else:
err = f'\n{test.shortDescription()}\n'
for testmsg, res in run.failures + run.errors:
casetext = re.search(".*CASE=(.*)\)", str(testmsg))
if casetext:
err += "\nCASE: {}\n".format(casetext.group(1)[1:-1])
if 'AssertionError:' in res:
_, msg = res.split('AssertionError: ')
else:
msg = res
err += msg
errors.append(err)
gradesummary['fail'].append(test.shortDescription().split('.')[0])
if hasattr(testclass, "tearDownClass"):
testclass.tearDownClass()
if 'style' in testclass.Points:
if testclass.stylemax != testclass.Points['style']:
errors.append('style errors')
else:
gradesummary['pass'].append('style')
grade, grade_report = make_grades(gradesummary,testclass)
msg = report_msg.format(info="\n".join(testclass.msgs),
passed=''.join(passed) if passed else "None",
failed=''.join(errors) if errors else "None",
grade = grade_report,
prog = testclass.__doc__,
version = versioninfo or "",
email =email or "",
course=course)
return msg, grade
EMPTYGRADE = {'pass': [], 'fail': []}
def errors_msg(errors):
"format error message"
msg = '-----------------errors found--------------\n'
for testmsg in errors:
msg += testmsg + "\n-------\n"
return msg
SIZE_REPORT_TEMPLATE = """lines of code : {}, {:4.0%} of reference
tokens in code : {}, {:4.0%} of reference
"""
def code_size_report(submitted_code, reference_code):
"generate message about code size"
return SIZE_REPORT_TEMPLATE.format(
submitted_code['lines'],
submitted_code['lines'] / reference_code['lines'],
submitted_code['words'],
submitted_code['words'] / reference_code['words'])
def pyshell(Parms,q):
summary, results, gradesummary = overallpy(**Parms)
q.put([summary,results,gradesummary])
def check_program_shell(Parms,q):
q.put(check_program(**Parms))
def case_sensitive():
"is the file system case sensitive?"
fname = f"testing_{random.randint(1_000_000,2_000_000)}"
os.mkdir(fname)
try:
os.mkdir(fname.upper())
os.rmdir(fname.upper())
except:
return False
finally:
os.rmdir(fname)
return True
| 29.720049 | 110 | 0.61141 |
7de097ffd11765a81ba7bc974ba64fe724e3a01c | 300 | py | Python | ABC/abc151-abc200/abc186/b/main.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc151-abc200/abc186/b/main.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc151-abc200/abc186/b/main.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
import numpy as np
import sys
input = sys.stdin.readline
h, w = map(int, input().split())
a = [list(map(int, input().split())) for _ in range(h)]
a = np.array(a)
print(np.sum(a - np.min(a)))
if __name__ == "__main__":
main()
| 15.789474 | 59 | 0.536667 |
ff4f690faa145a5aabdd5753f3bee4a211c47b04 | 134 | py | Python | 01 arrays/test.py | harshrajm/Python-Algo-DS | 02437fa923b2b6264b29bd7ba84ccbb7feb9c8f0 | [
"MIT"
] | null | null | null | 01 arrays/test.py | harshrajm/Python-Algo-DS | 02437fa923b2b6264b29bd7ba84ccbb7feb9c8f0 | [
"MIT"
] | null | null | null | 01 arrays/test.py | harshrajm/Python-Algo-DS | 02437fa923b2b6264b29bd7ba84ccbb7feb9c8f0 | [
"MIT"
] | null | null | null | def sum1(n):
final_sum = 0
for x in range(n+1):
final_sum += x
return final_sum
print(sum1(10)) | 14.888889 | 25 | 0.5 |
bdc4fac9119368990ab32fa721e5e286a8bfd04d | 1,937 | py | Python | robot/EDA/resources/EducationCloudSettingsPageObject.py | jbrandolinisf/EDA | 16e3ad1ec2a161cfd6e233780cfc4aae677db25a | [
"BSD-3-Clause"
] | 113 | 2019-05-03T14:57:51.000Z | 2022-03-25T18:09:24.000Z | robot/EDA/resources/EducationCloudSettingsPageObject.py | aarsvoboda/EDA | a01224b38b1c0126092930e299055de5079c93be | [
"BSD-3-Clause"
] | 637 | 2019-04-30T18:29:28.000Z | 2022-03-31T22:01:08.000Z | robot/EDA/resources/EducationCloudSettingsPageObject.py | aarsvoboda/EDA | a01224b38b1c0126092930e299055de5079c93be | [
"BSD-3-Clause"
] | 52 | 2019-07-04T03:13:08.000Z | 2022-03-01T16:44:15.000Z | from BaseObjects import BaseEDAPage
from EDA import eda_lex_locators
from cumulusci.robotframework.pageobjects import HomePage
from cumulusci.robotframework.pageobjects import pageobject
import time
from datetime import date
@pageobject("Home", "Education Cloud Settings")
class EducationCloudSettingsPage(BaseEDAPage, HomePage):
def _is_current_page(self):
""" Verify we are on the Settings Health Check page
by verifying Settings Health Check page header
"""
locator = eda_lex_locators["eda_settings_new"]["edc_header"].format("Education Data Architecture")
self.selenium.wait_until_page_contains_element(
locator,
error="EDC header is not available"
)
def verify_app_tiles_displayed(self,**kwargs):
""" This method verifies the app tiles are displayed in the Education Cloud Settings home
page.
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_new"]["app_tile"].format(field,value)
self.selenium.wait_until_page_contains_element(locator, timeout=60, error=f'{locator} is not available')
self.selenium.wait_until_element_is_visible(locator,
error= "Element is not displayed for the user")
def click_app_in_edc_home(self,value):
""" This method will click on the button in a tile to launch a particular app by accepting
the name of the button as its parameter
"""
locator = eda_lex_locators["eda_settings_new"]["global_action"].format(value)
self.selenium.wait_until_page_contains_element(locator, timeout=60, error=f'{locator} is not available')
self.selenium.wait_until_element_is_visible(locator,
error= "Element is not displayed for the user")
self.selenium.click_element(locator) | 48.425 | 116 | 0.682499 |
842ece7419105e1b3f3b8d2be0f95fc5042387f0 | 1,887 | py | Python | azure-iot-device/samples/sync-samples/send_message_x509.py | dominicbetts/azure-iot-sdk-python | ea70d2a319df2d602f8102e70a4e88635febf1b8 | [
"MIT"
] | 366 | 2016-12-02T20:38:05.000Z | 2022-03-29T10:08:14.000Z | azure-iot-device/samples/sync-samples/send_message_x509.py | dominicbetts/azure-iot-sdk-python | ea70d2a319df2d602f8102e70a4e88635febf1b8 | [
"MIT"
] | 640 | 2016-12-16T21:59:48.000Z | 2022-03-30T20:17:52.000Z | azure-iot-device/samples/sync-samples/send_message_x509.py | dominicbetts/azure-iot-sdk-python | ea70d2a319df2d602f8102e70a4e88635febf1b8 | [
"MIT"
] | 371 | 2016-11-16T16:06:04.000Z | 2022-03-31T10:10:57.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import time
import uuid
from azure.iot.device import IoTHubDeviceClient, Message, X509
# The connection string for a device should never be stored in code.
# For the sake of simplicity we are creating the X509 connection string
# containing Hostname and Device Id in the following format:
# "HostName=<iothub_host_name>;DeviceId=<device_id>;x509=true"
hostname = os.getenv("HOSTNAME")
# The device that has been created on the portal using X509 CA signing or Self signing capabilities
device_id = os.getenv("DEVICE_ID")
x509 = X509(
cert_file=os.getenv("X509_CERT_FILE"),
key_file=os.getenv("X509_KEY_FILE"),
pass_phrase=os.getenv("PASS_PHRASE"),
)
# The client object is used to interact with your Azure IoT hub.
device_client = IoTHubDeviceClient.create_from_x509_certificate(
hostname=hostname, device_id=device_id, x509=x509
)
# Connect the client.
device_client.connect()
# send 5 messages with a 1 second pause between each message
for i in range(1, 6):
print("sending message #" + str(i))
msg = Message("test wind speed " + str(i))
msg.message_id = uuid.uuid4()
msg.correlation_id = "correlation-1234"
msg.custom_properties["tornado-warning"] = "yes"
msg.content_encoding = "utf-8"
msg.content_type = "application/json"
device_client.send_message(msg)
time.sleep(1)
# send only string messages
for i in range(6, 11):
print("sending message #" + str(i))
device_client.send_message("test payload message " + str(i))
time.sleep(1)
# finally, shut down the client
device_client.shutdown()
| 33.105263 | 99 | 0.680445 |
f1b6001214232faaa9ba3a7cfcafc8b841d0b59b | 3,481 | py | Python | sdk/lusid/models/model_options_type.py | slemasne/lusid-sdk-python-preview | 94a97951ec2052bc1672b7be21e52ad2fcf6eea0 | [
"MIT"
] | null | null | null | sdk/lusid/models/model_options_type.py | slemasne/lusid-sdk-python-preview | 94a97951ec2052bc1672b7be21e52ad2fcf6eea0 | [
"MIT"
] | null | null | null | sdk/lusid/models/model_options_type.py | slemasne/lusid-sdk-python-preview | 94a97951ec2052bc1672b7be21e52ad2fcf6eea0 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3725
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class ModelOptionsType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
INVALID = "Invalid"
OPAQUEMODELOPTIONS = "OpaqueModelOptions"
EMPTYMODELOPTIONS = "EmptyModelOptions"
INDEXMODELOPTIONS = "IndexModelOptions"
allowable_values = [INVALID, OPAQUEMODELOPTIONS, EMPTYMODELOPTIONS, INDEXMODELOPTIONS] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
}
attribute_map = {
}
required_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""ModelOptionsType - a model defined in OpenAPI"
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ModelOptionsType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ModelOptionsType):
return True
return self.to_dict() != other.to_dict()
| 28.300813 | 104 | 0.582304 |
8bd12d23a45ce457d7e6a2e64be5a48601b464c9 | 1,085 | py | Python | tensorkit/layers/contextual.py | lizeyan/tensorkit | 2997a5914ec3c3ec72f91eb5906b5ee878fdc020 | [
"MIT"
] | null | null | null | tensorkit/layers/contextual.py | lizeyan/tensorkit | 2997a5914ec3c3ec72f91eb5906b5ee878fdc020 | [
"MIT"
] | null | null | null | tensorkit/layers/contextual.py | lizeyan/tensorkit | 2997a5914ec3c3ec72f91eb5906b5ee878fdc020 | [
"MIT"
] | 2 | 2020-10-15T06:41:32.000Z | 2021-01-27T12:55:11.000Z | from typing import *
from ..tensor import Tensor
from .core import *
__all__ = [
'IgnoreContext', 'AddContext', 'MultiplyContext',
]
class IgnoreContext(BaseLayer):
"""
A module which simply returns the input, ignoring any context.
"""
def forward(self,
input: Tensor,
context: Optional[List[Tensor]] = None) -> Tensor:
return input
class AddContext(BaseLayer):
"""
A module which adds the input with the contexts.
"""
def forward(self,
input: Tensor,
context: Optional[List[Tensor]] = None) -> Tensor:
if context is not None:
for t in context:
input = input + t
return input
class MultiplyContext(BaseLayer):
"""
A module which multiplies the input with the contexts.
"""
def forward(self,
input: Tensor,
context: Optional[List[Tensor]] = None) -> Tensor:
if context is not None:
for t in context:
input = input * t
return input
| 22.604167 | 66 | 0.56129 |
46f5710effd1c5d925fa498a8135ffc302c49aea | 1,644 | py | Python | Python-DailyCode/CR00_playground.py | ZhangCrow/INFO_796_Faker | 27340cf544e2517efb4c40c1f894243f2f05760a | [
"MIT"
] | null | null | null | Python-DailyCode/CR00_playground.py | ZhangCrow/INFO_796_Faker | 27340cf544e2517efb4c40c1f894243f2f05760a | [
"MIT"
] | null | null | null | Python-DailyCode/CR00_playground.py | ZhangCrow/INFO_796_Faker | 27340cf544e2517efb4c40c1f894243f2f05760a | [
"MIT"
] | null | null | null | """
———————————————————————
Python3
CR00_playground.py
DailyCode 临时调试板
———————————————————————
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not (head and head.next):
return head
cur = head
prev = None
while cur:
tmp = cur.next
cur.next = prev
prev = cur
cur = tmp
return prev
def stringToIntegerList(input):
return json.loads(input)
def stringToListNode(input):
# Generate list from the input
numbers = stringToIntegerList(input)
# Now convert that list into linked list
dummyRoot = ListNode(0)
ptr = dummyRoot
for number in numbers:
ptr.next = ListNode(number)
ptr = ptr.next
ptr = dummyRoot.next
return ptr
def listNodeToString(node):
if not node:
return "[]"
result = ""
while node:
result += str(node.val) + ", "
node = node.next
return "[" + result[:-2] + "]"
def main():
import sys
import io
def readlines():
for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):
yield line.strip('\n')
lines = readlines()
while True:
try:
line = next(lines)
head = stringToListNode(line);
ret = Solution().reverseList(head)
out = listNodeToString(ret);
print(out)
except StopIteration:
break
if __name__ == '__main__':
main()
| 21.350649 | 73 | 0.538929 |
7bb93ad373770b32fce77bc8243a4694527e0860 | 2,140 | py | Python | mobilenet_tf.py | irenepap2/FACT_UvA_2022 | 32bc9448195928469d960d0120e0ff0e2a1a9f52 | [
"MIT"
] | null | null | null | mobilenet_tf.py | irenepap2/FACT_UvA_2022 | 32bc9448195928469d960d0120e0ff0e2a1a9f52 | [
"MIT"
] | null | null | null | mobilenet_tf.py | irenepap2/FACT_UvA_2022 | 32bc9448195928469d960d0120e0ff0e2a1a9f52 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers import Input, DepthwiseConv2D
from tensorflow.keras.layers import Conv2D, BatchNormalization
from tensorflow.keras.layers import ReLU, AvgPool2D, Flatten, Dense
from tensorflow.keras import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Reshape, Dropout, Activation
import cv2
def mobilnet_block (x, filters, strides):
x = DepthwiseConv2D(kernel_size = 3, strides = strides, padding = 'same', use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(filters = filters, kernel_size = 1, strides = 1, use_bias=False)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
return x
input = Input(shape = (256,256,3))
x = Conv2D(filters = 32, kernel_size = 3, strides = 2, padding = 'same', use_bias=False)(input)
x = BatchNormalization()(x)
x = ReLU()(x)
x = mobilnet_block(x, filters = 64, strides = 1)
x = mobilnet_block(x, filters = 128, strides = 2)
x = mobilnet_block(x, filters = 128, strides = 1)
x = mobilnet_block(x, filters = 256, strides = 2)
x = mobilnet_block(x, filters = 256, strides = 1)
x = mobilnet_block(x, filters = 512, strides = 2)
for _ in range (5):
x = mobilnet_block(x, filters = 512, strides = 1)
x = mobilnet_block(x, filters = 1024, strides = 2)
x = mobilnet_block(x, filters = 1024, strides = 1)
x = GlobalAveragePooling2D()(x)
x = Reshape((1,1,1024))(x)
x = Dropout(0.2)(x)
x = Conv2D(filters=2, kernel_size=1)(x)
x = Reshape((2,))(x)
# output = Activation('linear')(x)
model = Model(inputs=input, outputs=x)
input = cv2.imread('imgs/4.png')
input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)[None, ...]/255
classifier = tf.keras.models.load_model('./mobilenet.savedmodel')
#load model pretrained model weights
weight_count = len(model.get_weights())
model.set_weights(classifier.get_weights()[:weight_count])
model.summary()
# print('Weights of first layer:')
# we transpose to compare if they are the same
# print(model.get_weights()[0].transpose(3,2,0,1))
# print('theirs', classifier.predict(input))
output = model.predict(input)
print(output.shape)
print('ours', output)
| 31.014493 | 96 | 0.703738 |
d4d68f33e90d762dedf42097b6675b01f9a28b66 | 5,013 | py | Python | utils/anaconda_package_data_hourly.py | ericdill/pydatanyc2019 | 6ee4823e42a0eadce656febd8fe17c078e21134c | [
"MIT"
] | null | null | null | utils/anaconda_package_data_hourly.py | ericdill/pydatanyc2019 | 6ee4823e42a0eadce656febd8fe17c078e21134c | [
"MIT"
] | null | null | null | utils/anaconda_package_data_hourly.py | ericdill/pydatanyc2019 | 6ee4823e42a0eadce656febd8fe17c078e21134c | [
"MIT"
] | null | null | null | import argparse
import dask.dataframe as dd
from datetime import datetime
import os
from os.path import dirname, exists
from os import listdir, makedirs
from datetime import timedelta, date
import pandas as pd
data_path = '/mnt/storage/anaconda-parsed-logs'
output_path = 's3://edill-data/cleaned-logs'
data_path = data_path.split(os.path.sep)
output_path = output_path.split(os.path.sep)
def make_folder(year, month):
folder_path = os.path.sep.join(output_path+[year]+[month])
if not exists(folder_path):
makedirs(folder_path, exist_ok=True)
def package_data_processing(data_path, output_path, year, month, day):
raw_data = dd.read_parquet(
os.path.sep.join(data_path+[year]+[month]+['{0}-{1}-{2}.parquet'.format(year, month, day)]),
categories = {'uri':500000, 'source':5})
#create column date
raw_data['timestamp'] = raw_data.index
raw_data['time']= raw_data.timestamp.map(lambda t: t.replace(minute=0, second=0, microsecond=0))
#exclude third-party channels that are not conda-forge and bioconda
cond = raw_data.source.isin(['cloudflare_conda','cloudflare_repo_anaconda'])
try:
cond |= raw_data.pkg_channel.isin(['conda-forge','bioconda','pyviz'])
except AttributeError:
raw_data['pkg_channel'] = None
print("Upstream dataset missing `pkg_channel` column. Caught the following error and continuing execution:")
import traceback
traceback.print_exc()
raw_data = raw_data[cond]
#create data_source column with value anaconda and conda-forge
#raw_data['data_source'] = raw_data['pkg_channel']\
# .apply(lambda x: 'conda-forge' if x == 'conda-forge' else 'bioconda' if x =='bioconda' else 'anaconda', meta='str')
raw_data['data_source'] = raw_data.pkg_channel.where(raw_data.pkg_channel.isin(['conda-forge', 'bioconda','pyviz']), 'anaconda')
#recode pkg_python e.g., 27 -> 2.7
raw_data['pkg_python'] = raw_data['pkg_python']\
.apply(lambda x: float(x)/10 if len(x)>0 else '', meta='str').astype(str)
#combine pkg_platform with pkg_arch
raw_data['pkg_platform'] = raw_data['pkg_platform'].astype(str)+'-'+raw_data['pkg_arch'].astype(str)
raw_data['pkg_platform'] = raw_data['pkg_platform']\
.apply(lambda x: '' if x=='-' else x, meta='str').astype(str) #if platform and arch are both blank
raw_data['day_name'] = raw_data.timestamp.dt.day_name()
raw_data['hour'] = raw_data.timestamp.dt.hour
#groupby
columns = ['time','data_source','pkg_name','pkg_version','pkg_platform','pkg_python', 'bytes', 'day_name', 'hour']
data = raw_data\
.groupby(columns)\
.size()\
.reset_index()
data.columns = columns + ['counts']
#save to .parquet file
output = data.compute()
for col in ['data_source','pkg_name','pkg_version','pkg_platform','pkg_python']:
output[col]=output[col].astype('category')
output_path = os.path.sep.join(output_path+[year]+[month]+['{0}-{1}-{2}.parquet'.format(year, month, day)])
print(f'writing to {output_path')
output.to_parquet(
output_path,
compression='SNAPPY',
#file_scheme='simple',
engine='pyarrow'
)
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def str_to_dt(datestr):
return datetime.strptime(datestr, '%Y-%m-%d')
def main():
parser = argparse.ArgumentParser()
# data starts at 2016-10-28
parser.add_argument("--start",
help="start date - YYYY-MM-DD",
type=str_to_dt,
)
# data ends at 2019-09-02
parser.add_argument("--end",
help="end date - YYYY-MM-DD",
type=str_to_dt,
)
parser.add_argument("--date",
help="date to process - YYYY-MM-DD",
action="append",
type=str_to_dt,
default=[])
args = parser.parse_args()
start = args.start
end = args.end
if start is None and end is None:
print("Cant do a daterange because start or end was not passed in")
dates = []
else:
dates = list(daterange(args.start, args.end))
dates.extend(args.date)
dates = sorted(set(dates))
print(f"processing {len(dates)} dates")
for single_date in dates:
print(single_date)
year = str(single_date.year)
month = str(single_date.strftime('%m'))
day = str(single_date.strftime('%d'))
make_folder(year, month)
try:
package_data_processing(data_path, output_path, year, month, day)
except Exception:
import traceback
traceback.print_exc()
continue
if __name__ == '__main__':
main() | 36.326087 | 132 | 0.6152 |
e5d7963ef607e2019d63082585148f6406260d56 | 827 | py | Python | packages/merlin/protocols/__init__.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/merlin/protocols/__init__.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/merlin/protocols/__init__.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <nichael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
# export
# flow parts
from .Producer import Producer as producer
from .Specification import Specification as specification
# asset categories
from .AssetCategory import AssetCategory as assetCategory
# and assets
from .Asset import Asset as asset
from .Directory import Directory as directory
from .File import File as file
from .Library import Library as library
from .Project import Project as project
# builders
from .Builder import Builder as builder
from .PrefixLayout import PrefixLayout as prefix
from .LibFlow import LibFlow as libflow
# miscellaneous parts
from .Compiler import Compiler as compiler
from .Language import Language as language
# tools
from .SCS import SCS as scs
# end of file
| 23.628571 | 57 | 0.785973 |
73fc1b025d7abd321785e9d253a2906cb3e42933 | 462 | py | Python | devilry/devilry_compressionutil/migrations/0007_auto_20181002_1053.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_compressionutil/migrations/0007_auto_20181002_1053.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_compressionutil/migrations/0007_auto_20181002_1053.py | aless80/devilry-django | 416c262e75170d5662542f15e2d7fecf5ab84730 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-10-02 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('devilry_compressionutil', '0006_compressedarchivemeta_created_by_role'),
]
operations = [
migrations.AlterField(
model_name='compressedarchivemeta',
name='archive_size',
field=models.BigIntegerField(),
),
]
| 22 | 82 | 0.634199 |
cc71286c1fe41531d6f888686b7d45957d45244f | 1,080 | py | Python | setup.py | mcgid/morenines | b5825d33ae4c44e39fc0b9763bdf371e00112b64 | [
"MIT"
] | null | null | null | setup.py | mcgid/morenines | b5825d33ae4c44e39fc0b9763bdf371e00112b64 | [
"MIT"
] | 21 | 2016-04-15T19:22:12.000Z | 2016-07-08T16:22:54.000Z | setup.py | mcgid/morenines | b5825d33ae4c44e39fc0b9763bdf371e00112b64 | [
"MIT"
] | null | null | null | import os
from codecs import open
from setuptools import setup, find_packages
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, 'DESCRIPTION.rst'), encoding='utf8') as f:
long_desc = f.read()
setup(
name='morenines',
version='1.1.0',
url='https://github.com/mcgid/morenines',
license='MIT',
description='A simple content change detector',
long_description=long_desc,
keywords=[
'backup',
'hashing',
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Topic :: System",
"Topic :: System :: Archiving",
"Topic :: System :: Archiving :: Mirroring",
"Topic :: Utilities",
],
install_requires=[
'click',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'mn = morenines.application:main'
]
},
)
| 24 | 75 | 0.587037 |
3087de773ba2ceb1688c19c2b7cd55a967833ae8 | 3,787 | py | Python | python/dl/scratch/quanta/turshare-stocka.py | shunliz/test | 3200cc7aa7ec5c11e5b7233813f94d9797411427 | [
"Apache-2.0"
] | null | null | null | python/dl/scratch/quanta/turshare-stocka.py | shunliz/test | 3200cc7aa7ec5c11e5b7233813f94d9797411427 | [
"Apache-2.0"
] | 1 | 2016-07-17T10:20:08.000Z | 2016-07-17T10:20:08.000Z | python/dl/scratch/quanta/turshare-stocka.py | shunliz/test | 3200cc7aa7ec5c11e5b7233813f94d9797411427 | [
"Apache-2.0"
] | null | null | null | import tushare as ts
import pandas as pd
# 获取近三年上证指数
data = ts.get_h_data('000001', index=True, start='2015-06-13', end='2017-06-13')
# lstm.py
import time
import warnings
import numpy as np
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
print('yo')
#Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
def load_data(filename, seq_len, normalise_window):
f = open(filename, 'r').read()
data = f.split('\n')
sequence_length = seq_len + 1
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
if normalise_window:
result = normalise_windows(result)
result = np.array(result)
row = round(0.9 * result.shape[0])
train = result[:int(row), :]
np.random.shuffle(train)
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = result[int(row):, :-1]
y_test = result[int(row):, -1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
return [x_train, y_train, x_test, y_test]
def normalise_windows(window_data):
normalised_data = []
for window in window_data:
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
def build_model(layers):
model = Sequential()
model.add(LSTM(
input_dim=layers[0],
output_dim=layers[1],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers[3]))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("Compilation Time : ", time.time() - start)
return model
def predict_point_by_point(model, data):
#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
predicted = model.predict(data)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
def predict_sequence_full(model, data, window_size):
#Shift the window by 1 new prediction each time, re-run predictions on new window
curr_frame = data[0]
predicted = []
for i in range(len(data)):
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
return predicted
def predict_sequences_multiple(model, data, window_size, prediction_len):
#Predict sequence of 50 steps before shifting prediction run forward by 50 steps
prediction_seqs = []
for i in range(int(len(data)//prediction_len)):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs | 32.646552 | 113 | 0.672828 |
ea4fe814b2e39cc7c94077910e96b0654a4c23ad | 260 | py | Python | libs/core/cornflow_core/schemas/patch.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | 2 | 2020-07-09T20:58:47.000Z | 2020-07-20T20:40:46.000Z | libs/core/cornflow_core/schemas/patch.py | baobabsoluciones/cornflow | bd7cae22107e5fe148704d5f41d4f58f9c410b40 | [
"Apache-2.0"
] | 2 | 2022-03-31T08:42:10.000Z | 2022-03-31T12:05:23.000Z | libs/core/cornflow_core/schemas/patch.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | null | null | null | """
Schemas for the PATCH operations
"""
from marshmallow import fields, Schema
class BasePatchOperation(Schema):
"""Base structure of a JSON Patch file"""
op = fields.Str(required=True)
path = fields.Str(required=True)
value = fields.Raw()
| 20 | 45 | 0.696154 |
709cd83ad4884e670826f705c021a2bdeb1e11b6 | 539 | py | Python | Problem 38/problem38.py | logicred/Euler-Project | 3488c409135729a444cb43ff06c5fdaf8feb8623 | [
"MIT"
] | 2 | 2019-09-12T01:09:01.000Z | 2019-09-18T00:23:33.000Z | Problem 38/problem38.py | logicred/Euler-Project | 3488c409135729a444cb43ff06c5fdaf8feb8623 | [
"MIT"
] | null | null | null | Problem 38/problem38.py | logicred/Euler-Project | 3488c409135729a444cb43ff06c5fdaf8feb8623 | [
"MIT"
] | null | null | null | #Answer = 932718654
#cost = 0.0638s
import time
start = time.time()
def pan_mul(num):
s = ''
for y in range(1, 10):
s = s + str(num * y)
if len(s) < 9:
continue
elif len(s) > 9:
return False, '0'
else:
t = set([str(x) for x in range(1, 10)])
if t == set(s):
return True, s
else:
return False, '0'
return False, '0'
maxnum = 0
for x in range(1, 9999 + 1):
a, b = pan_mul(x)
if a and maxnum < int(b):
maxnum = int(b)
print(maxnum)
end = time.time()
print(end - start) | 16.84375 | 43 | 0.534323 |
c9984c84e5b23dfd69031ef16360223f63a2f6a2 | 4,893 | py | Python | python/GafferArnoldUI/ShaderMenu.py | timlehr/gaffer | 354acd6af7500e0bd1ce19d7c417929e2f0a919e | [
"BSD-3-Clause"
] | null | null | null | python/GafferArnoldUI/ShaderMenu.py | timlehr/gaffer | 354acd6af7500e0bd1ce19d7c417929e2f0a919e | [
"BSD-3-Clause"
] | null | null | null | python/GafferArnoldUI/ShaderMenu.py | timlehr/gaffer | 354acd6af7500e0bd1ce19d7c417929e2f0a919e | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import collections
import arnold
import IECore
import IECoreArnold
import GafferUI
import GafferArnold
def appendShaders( menuDefinition, prefix="/Arnold" ) :
MenuItem = collections.namedtuple( "MenuItem", [ "menuPath", "nodeCreator" ] )
# Build a list of menu items we want to create.
categorisedMenuItems = []
uncategorisedMenuItems = []
with IECoreArnold.UniverseBlock( writable = False ) :
it = arnold.AiUniverseGetNodeEntryIterator( arnold.AI_NODE_SHADER | arnold.AI_NODE_LIGHT )
while not arnold.AiNodeEntryIteratorFinished( it ) :
nodeEntry = arnold.AiNodeEntryIteratorGetNext( it )
shaderName = arnold.AiNodeEntryGetName( nodeEntry )
displayName = " ".join( [ IECore.CamelCase.toSpaced( x ) for x in shaderName.split( "_" ) ] )
nodeName = displayName.replace( " ", "" )
category = __aiMetadataGetStr( nodeEntry, "", "gaffer.nodeMenu.category" )
if category == "" :
continue
if arnold.AiNodeEntryGetType( nodeEntry ) == arnold.AI_NODE_SHADER :
menuPath = "Shader"
if shaderName == "light_blocker" :
nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldLightFilter, nodeName )
else :
nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldShader, nodeName )
else :
menuPath = "Light"
if shaderName != "mesh_light" :
nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldLight, nodeName )
else :
nodeCreator = GafferArnold.ArnoldMeshLight
if category :
menuPath += "/" + category.strip( "/" )
menuPath += "/" + displayName
if category :
categorisedMenuItems.append( MenuItem( menuPath, nodeCreator ) )
else :
uncategorisedMenuItems.append( MenuItem( menuPath, nodeCreator ) )
arnold.AiNodeEntryIteratorDestroy( it )
# Tidy up uncategorised shaders into a submenu if necessary.
rootsWithCategories = set( [ m.menuPath.partition( "/" )[0] for m in categorisedMenuItems ] )
for i, menuItem in enumerate( uncategorisedMenuItems ) :
s = menuItem.menuPath.split( "/" )
if s[0] in rootsWithCategories :
uncategorisedMenuItems[i] = MenuItem( "/".join( [ s[0], "Other", s[1] ] ), menuItem.nodeCreator )
# Create the actual menu items.
for menuItem in categorisedMenuItems + uncategorisedMenuItems :
menuDefinition.append(
prefix + "/" + menuItem.menuPath,
{
"command" : GafferUI.NodeMenu.nodeCreatorWrapper( menuItem.nodeCreator ),
"searchText" : "ai" + menuItem.menuPath.rpartition( "/" )[2].replace( " ", "" ),
}
)
def __shaderCreator( shaderName, nodeType, nodeName ) :
node = nodeType( nodeName )
node.loadShader( shaderName )
if isinstance( node, GafferArnold.ArnoldLight ) :
node["name"].setValue( nodeName[:1].lower() + nodeName[1:] )
return node
def __aiMetadataGetStr( nodeEntry, paramName, name ) :
value = arnold.AtStringReturn()
if arnold.AiMetaDataGetStr( nodeEntry, paramName, name, value ) :
return arnold.AtStringToStr( value )
return None
| 36.789474 | 109 | 0.702636 |
a138b25d635fb878e3e7a6f447904fb4de13b0d2 | 4,402 | py | Python | RESEPT/Spearman_Correlation.py | YuLin-code/MP-MIM | 0871a27d1717dc6f5dad623c6721824b104c3bb8 | [
"CECILL-B"
] | null | null | null | RESEPT/Spearman_Correlation.py | YuLin-code/MP-MIM | 0871a27d1717dc6f5dad623c6721824b104c3bb8 | [
"CECILL-B"
] | null | null | null | RESEPT/Spearman_Correlation.py | YuLin-code/MP-MIM | 0871a27d1717dc6f5dad623c6721824b104c3bb8 | [
"CECILL-B"
] | null | null | null | import numpy as np
import pandas as pd
import os
from scipy.stats import spearmanr
import argparse
parser = argparse.ArgumentParser(description='Main Entrance of MP_MIM_RESEPT')
parser.add_argument('--sampleName', type=str, default='151507')
parser.add_argument('--MP-k-num', type=int, default=90, help='number of k_num in KNN graph of message passing (default: 90)')
parser.add_argument('--MP-l-num', type=int, default=15, help='number of layer_num in message passing (default: 15)')
args = parser.parse_args()
if __name__ == '__main__':
# sample init
sample = args.sampleName
k_num_distance_att = args.MP_k_num
layer_num_distance_att = args.MP_l_num
ground_truth_folder_path = os.path.abspath('./Embedding_Ground_Truth_Quality_Rank_'+sample+'/')
embedding_in_RESEPT_folder = "RESEPT_MP_embedding_"+sample+"/"
####sample list
sample_list = [ '151507','151508', '151509', '151510', '151669', '151670', '151671', '151672', '151673', '151674', '151675', '151676','18-64','2-5', '2-8', 'T4857']
letter_list = [ 'a','b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l','m', 'n', 'o', 'p']
PEalphaList = ['0.1','0.2','0.3', '0.5', '1.0', '1.2', '1.5','2.0']
zdimList = ['3','10', '16','32', '64', '128', '256']
count_init = sample_list.index(sample)
count = 56*count_init
letter = letter_list[count_init]
legend_name_list = []
for i in range(len(PEalphaList)):
for j in range((len(zdimList))):
count = count + 1
embedding_name = sample+'_'+letter+'_'+str(count)+'_raw_PEalpha'+str(PEalphaList[i])+'_zdim'+str(zdimList[j])
legend_name_list.append(embedding_name)
# Ground Truth
raw_embedding_kmeans_ari_result_df = pd.read_csv(ground_truth_folder_path+'/'+sample+'_raw_embedding_ground_truth_rank.csv', index_col = 0)
raw_embedding_kmeans_ari_result_df_T = raw_embedding_kmeans_ari_result_df.T
raw_embedding_kmeans_ari_result_name_list = []
for i in range(56):
raw_embedding_kmeans_ari_result_name_list.append(raw_embedding_kmeans_ari_result_df_T.iloc[0].values[i].split('_')[0]+'_'+raw_embedding_kmeans_ari_result_df_T.iloc[0].values[i].split('_')[1]+'_'+raw_embedding_kmeans_ari_result_df_T.iloc[0].values[i].split('_')[2]+'_'+raw_embedding_kmeans_ari_result_df_T.iloc[0].values[i].split('_')[3]+'_'+raw_embedding_kmeans_ari_result_df_T.iloc[0].values[i].split('_')[9]+'_'+raw_embedding_kmeans_ari_result_df_T.iloc[0].values[i].split('_')[12])
raw_embedding_kmeans_ari_result_name_np = np.array(raw_embedding_kmeans_ari_result_name_list)
raw_embedding_kmeans_ari_result_name_init_np = np.zeros((raw_embedding_kmeans_ari_result_name_np.shape[0],2))
for i in range(raw_embedding_kmeans_ari_result_name_np.shape[0]):
raw_embedding_kmeans_ari_result_name_init_np[i,0] = raw_embedding_kmeans_ari_result_name_np[i].split('_')[2]
raw_embedding_kmeans_ari_result_name_init_np[i,1] = i+1
raw_embedding_kmeans_ari_result_name_init_np_int = raw_embedding_kmeans_ari_result_name_init_np.astype(int)
raw_embedding_kmeans_ari_result_order = raw_embedding_kmeans_ari_result_name_init_np_int[np.argsort(raw_embedding_kmeans_ari_result_name_init_np_int[:,0])][:,1]
# Spearman Correlation
MP_MIM_csv = sample+'_gat_self_loop_euc_knn_graph_K'+str(k_num_distance_att)+'_layer'+str(layer_num_distance_att)+'_MP_MIM.csv'
MP_MIM_result_df = pd.read_csv('./'+MP_MIM_csv,index_col=0)
MP_MIM_result = MP_MIM_result_df.T.values[1,:]
MP_MIM_result_sort_descending = pd.DataFrame(MP_MIM_result.reshape(-1,len(MP_MIM_result)),columns=legend_name_list).sort_values(by=0,axis=1,ascending=False)
MP_MIM_result_sort_descending_np = np.array(list(MP_MIM_result_sort_descending))
MI_MIM_init_np = np.zeros((MP_MIM_result_sort_descending_np.shape[0],2))
for k in range(MP_MIM_result_sort_descending_np.shape[0]):
MI_MIM_init_np[k,0] = MP_MIM_result_sort_descending_np[k].split('_')[2]
MI_MIM_init_np[k,1] = k+1
MI_MIM_init_np_int = MI_MIM_init_np.astype(int)
MI_MIM_order = MI_MIM_init_np_int[np.argsort(MI_MIM_init_np_int[:,0])][:,1]
Spearman_correlation,pvalue=spearmanr(raw_embedding_kmeans_ari_result_order,MI_MIM_order)
print(sample+' Spearman correlation is '+str(Spearman_correlation)+' and P-value is '+str(pvalue))
| 67.723077 | 493 | 0.728532 |
53a94d50343877dc461035177e19169bd60ff1a5 | 211 | py | Python | backend/home/models.py | crowdbotics-apps/techcompilertest-33082 | 07a24fb3f97f07823b86d13584756967123526e6 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/models.py | crowdbotics-apps/techcompilertest-33082 | 07a24fb3f97f07823b86d13584756967123526e6 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/models.py | crowdbotics-apps/techcompilertest-33082 | 07a24fb3f97f07823b86d13584756967123526e6 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.conf import settings
from django.db import models
class Employ(models.Model):
"Generated Model"
name = models.TextField()
department = models.TextField()
email = models.TextField()
| 21.1 | 35 | 0.720379 |
c5b665591f8979c748946fd56e7eac697f7a57e6 | 3,044 | py | Python | src/train.py | AasimBaig/synergy-transfer-learning-task | 090f8070de2a7188d95d03b8787cb243631180e8 | [
"MIT"
] | null | null | null | src/train.py | AasimBaig/synergy-transfer-learning-task | 090f8070de2a7188d95d03b8787cb243631180e8 | [
"MIT"
] | null | null | null | src/train.py | AasimBaig/synergy-transfer-learning-task | 090f8070de2a7188d95d03b8787cb243631180e8 | [
"MIT"
] | null | null | null | from torchvision import models
import os
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
import torch
import torch.nn as nn
from torchvision import transforms
from torchsummary import summary
from torch.utils.data import DataLoader
from torch.autograd import Variable
import config
import engine
from dataset import AntsBeesDataset
import nn_model
from sklearn.model_selection import train_test_split
def run_training():
# get csv file that contains image paths.
train = pd.read_csv("/home/aasim/synergy-ai-task/src/train_data.csv")
test = pd.read_csv("/home/aasim/synergy-ai-task/src/val_data.csv")
# call out custom dataset.
train_dataset = AntsBeesDataset(train["image_paths"].tolist(),
train["targets"].tolist(),
# various transform to increase the datasize because we are low on data.
transform=[
transforms.Resize(
size=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH), interpolation=Image.BILINEAR),
transforms.RandomHorizontalFlip(),
transforms.RandomGrayscale(),
transforms.RandomRotation(40),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
test_dataset = AntsBeesDataset(test["image_paths"].tolist(),
test["targets"].tolist(),
transform=[
transforms.Resize(
size=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH), interpolation=Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
# train_loader
train_loader = DataLoader(
train_dataset,
batch_size=config.BATCH_SIZE,
num_workers=config.NUM_WORKERS,
shuffle=True)
# test_loader or val_loader
test_loader = DataLoader(
test_dataset,
batch_size=config.BATCH_SIZE,
num_workers=config.NUM_WORKERS,
shuffle=False)
# get model
model = nn_model.get_model()
torch.cuda.empty_cache()
# add model to GPU
if torch.cuda.is_available():
model.cuda()
# different loss function.
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config.LR)
# scheduling Learning rate.
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.8, patience=5, verbose=True)
model.train()
for epoch in range(config.EPOCHS):
train_loss, train_acc = engine.train_fn(model, train_loader, optimizer)
print(
f"Epoch: {epoch} --- Training loss : {train_loss} --- Accuracy : {train_acc}\n")
print("\nTraining Finished \n")
engine.save_checkpoint(model)
engine.check_accuracy(test_loader)
if __name__ == "__main__":
run_training()
| 29.843137 | 108 | 0.638633 |
b006305c8754aa191aa85d2fbc7f763e0edcad01 | 24,382 | py | Python | pytest_django/plugin.py | bdauvergne/pytest-django | 66205b3d6ac21e65fbd3d95f1f541db30a596e53 | [
"BSD-3-Clause"
] | 967 | 2015-01-06T14:36:22.000Z | 2022-03-29T21:07:03.000Z | pytest_django/plugin.py | bdauvergne/pytest-django | 66205b3d6ac21e65fbd3d95f1f541db30a596e53 | [
"BSD-3-Clause"
] | 743 | 2015-01-02T12:20:13.000Z | 2022-03-25T17:13:05.000Z | pytest_django/plugin.py | bdauvergne/pytest-django | 66205b3d6ac21e65fbd3d95f1f541db30a596e53 | [
"BSD-3-Clause"
] | 308 | 2015-01-08T11:40:23.000Z | 2022-03-23T02:53:14.000Z | """A pytest plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtures.
"""
import contextlib
import inspect
import os
import pathlib
import sys
from functools import reduce
from typing import Generator, List, Optional, Tuple, Union
import pytest
from .django_compat import is_django_unittest # noqa
from .fixtures import _live_server_helper # noqa
from .fixtures import admin_client # noqa
from .fixtures import admin_user # noqa
from .fixtures import async_client # noqa
from .fixtures import async_rf # noqa
from .fixtures import client # noqa
from .fixtures import db # noqa
from .fixtures import django_assert_max_num_queries # noqa
from .fixtures import django_assert_num_queries # noqa
from .fixtures import django_capture_on_commit_callbacks # noqa
from .fixtures import django_db_createdb # noqa
from .fixtures import django_db_keepdb # noqa
from .fixtures import django_db_modify_db_settings # noqa
from .fixtures import django_db_modify_db_settings_parallel_suffix # noqa
from .fixtures import django_db_modify_db_settings_tox_suffix # noqa
from .fixtures import django_db_modify_db_settings_xdist_suffix # noqa
from .fixtures import django_db_reset_sequences # noqa
from .fixtures import django_db_setup # noqa
from .fixtures import django_db_use_migrations # noqa
from .fixtures import django_user_model # noqa
from .fixtures import django_username_field # noqa
from .fixtures import live_server # noqa
from .fixtures import rf # noqa
from .fixtures import settings # noqa
from .fixtures import transactional_db # noqa
from .lazy_django import django_settings_is_configured, skip_if_no_django
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import ContextManager, NoReturn
import django
from .fixtures import _DjangoDb, _DjangoDbDatabases
SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE"
CONFIGURATION_ENV = "DJANGO_CONFIGURATION"
INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS"
_report_header = []
# ############### pytest hooks ################
@pytest.hookimpl()
def pytest_addoption(parser) -> None:
group = parser.getgroup("django")
group.addoption(
"--reuse-db",
action="store_true",
dest="reuse_db",
default=False,
help="Re-use the testing database if it already exists, "
"and do not remove it when the test finishes.",
)
group.addoption(
"--create-db",
action="store_true",
dest="create_db",
default=False,
help="Re-create the database, even if it exists. This "
"option can be used to override --reuse-db.",
)
group.addoption(
"--ds",
action="store",
type=str,
dest="ds",
default=None,
help="Set DJANGO_SETTINGS_MODULE.",
)
group.addoption(
"--dc",
action="store",
type=str,
dest="dc",
default=None,
help="Set DJANGO_CONFIGURATION.",
)
group.addoption(
"--nomigrations",
"--no-migrations",
action="store_true",
dest="nomigrations",
default=False,
help="Disable Django migrations on test setup",
)
group.addoption(
"--migrations",
action="store_false",
dest="nomigrations",
default=False,
help="Enable Django migrations on test setup",
)
parser.addini(
CONFIGURATION_ENV, "django-configurations class to use by pytest-django."
)
group.addoption(
"--liveserver",
default=None,
help="Address and port for the live_server fixture.",
)
parser.addini(
SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django."
)
parser.addini(
"django_find_project",
"Automatically find and add a Django project to the " "Python path.",
type="bool",
default=True,
)
parser.addini(
"django_debug_mode",
"How to set the Django DEBUG setting (default `False`). "
"Use `keep` to not override.",
default="False",
)
group.addoption(
"--fail-on-template-vars",
action="store_true",
dest="itv",
default=False,
help="Fail for invalid variables in templates.",
)
parser.addini(
INVALID_TEMPLATE_VARS_ENV,
"Fail for invalid variables in templates.",
type="bool",
default=False,
)
PROJECT_FOUND = (
"pytest-django found a Django project in %s "
"(it contains manage.py) and added it to the Python path.\n"
'If this is wrong, add "django_find_project = false" to '
"pytest.ini and explicitly manage your Python path."
)
PROJECT_NOT_FOUND = (
"pytest-django could not find a Django project "
"(no manage.py file could be found). You must "
"explicitly add your Django project to the Python path "
"to have it picked up."
)
PROJECT_SCAN_DISABLED = (
"pytest-django did not search for Django "
"projects since it is disabled in the configuration "
'("django_find_project = false")'
)
@contextlib.contextmanager
def _handle_import_error(extra_message: str) -> Generator[None, None, None]:
try:
yield
except ImportError as e:
django_msg = (e.args[0] + "\n\n") if e.args else ""
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args) -> str:
def is_django_project(path: pathlib.Path) -> bool:
try:
return path.is_dir() and (path / "manage.py").exists()
except OSError:
return False
def arg_to_path(arg: str) -> pathlib.Path:
# Test classes or functions can be appended to paths separated by ::
arg = arg.split("::", 1)[0]
return pathlib.Path(arg)
def find_django_path(args) -> Optional[pathlib.Path]:
str_args = (str(arg) for arg in args)
path_args = [arg_to_path(x) for x in str_args if not x.startswith("-")]
cwd = pathlib.Path.cwd()
if not path_args:
path_args.append(cwd)
elif cwd not in path_args:
path_args.append(cwd)
for arg in path_args:
if is_django_project(arg):
return arg
for parent in arg.parents:
if is_django_project(parent):
return parent
return None
project_dir = find_django_path(args)
if project_dir:
sys.path.insert(0, str(project_dir.absolute()))
return PROJECT_FOUND % project_dir
return PROJECT_NOT_FOUND
def _setup_django() -> None:
if "django" not in sys.modules:
return
import django.conf
# Avoid force-loading Django when settings are not properly configured.
if not django.conf.settings.configured:
return
import django.apps
if not django.apps.apps.ready:
django.setup()
_blocking_manager.block()
def _get_boolean_value(
x: Union[None, bool, str],
name: str,
default: Optional[bool] = None,
) -> bool:
if x is None:
return bool(default)
if isinstance(x, bool):
return x
possible_values = {"true": True, "false": False, "1": True, "0": False}
try:
return possible_values[x.lower()]
except KeyError:
raise ValueError(
"{} is not a valid value for {}. "
"It must be one of {}.".format(x, name, ", ".join(possible_values.keys()))
)
@pytest.hookimpl()
def pytest_load_initial_conftests(
early_config,
parser,
args: List[str],
) -> None:
# Register the marks
early_config.addinivalue_line(
"markers",
"django_db(transaction=False, reset_sequences=False, databases=None): "
"Mark the test as using the Django test database. "
"The *transaction* argument allows you to use real transactions "
"in the test like Django's TransactionTestCase. "
"The *reset_sequences* argument resets database sequences before "
"the test. "
"The *databases* argument sets which database aliases the test "
"uses (by default, only 'default'). Use '__all__' for all databases.",
)
early_config.addinivalue_line(
"markers",
"urls(modstr): Use a different URLconf for this test, similar to "
"the `urls` attribute of Django's `TestCase` objects. *modstr* is "
"a string specifying the module of a URL config, e.g. "
'"my_app.test_urls".',
)
early_config.addinivalue_line(
"markers",
"ignore_template_errors(): ignore errors from invalid template "
"variables (if --fail-on-template-vars is used).",
)
options = parser.parse_known_args(args)
if options.version or options.help:
return
django_find_project = _get_boolean_value(
early_config.getini("django_find_project"), "django_find_project"
)
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
if (
options.itv
or _get_boolean_value(
os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV
)
or early_config.getini(INVALID_TEMPLATE_VARS_ENV)
):
os.environ[INVALID_TEMPLATE_VARS_ENV] = "true"
def _get_option_with_source(
option: Optional[str],
envname: str,
) -> Union[Tuple[str, str], Tuple[None, None]]:
if option:
return option, "option"
if envname in os.environ:
return os.environ[envname], "env"
cfgval = early_config.getini(envname)
if cfgval:
return cfgval, "ini"
return None, None
ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV)
dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV)
if ds:
_report_header.append("settings: {} (from {})".format(ds, ds_source))
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
_report_header.append("configuration: {} (from {})".format(dc, dc_source))
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
# Forcefully load Django settings, throws ImportError or
# ImproperlyConfigured if settings cannot be loaded.
from django.conf import settings as dj_settings
with _handle_import_error(_django_project_scan_outcome):
dj_settings.DATABASES
_setup_django()
@pytest.hookimpl()
def pytest_report_header() -> Optional[List[str]]:
if _report_header:
return ["django: " + ", ".join(_report_header)]
return None
@pytest.hookimpl(trylast=True)
def pytest_configure() -> None:
# Allow Django settings to be configured in a user pytest_configure call,
# but make sure we call django.setup()
_setup_django()
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(items: List[pytest.Item]) -> None:
# If Django is not configured we don't need to bother
if not django_settings_is_configured():
return
from django.test import TestCase, TransactionTestCase
def get_order_number(test: pytest.Item) -> int:
test_cls = getattr(test, "cls", None)
if test_cls:
# Beware, TestCase is a subclass of TransactionTestCase
if issubclass(test_cls, TestCase):
return 0
if issubclass(test_cls, TransactionTestCase):
return 1
marker_db = test.get_closest_marker('django_db')
if not marker_db:
transaction = None
else:
transaction = validate_django_db(marker_db)[0]
if transaction is True:
return 1
fixtures = getattr(test, 'fixturenames', [])
if "transactional_db" in fixtures:
return 1
if transaction is False:
return 0
if "db" in fixtures:
return 0
return 2
items.sort(key=get_order_number)
@pytest.fixture(autouse=True, scope="session")
def django_test_environment(request) -> None:
"""
Ensure that Django is loaded and has its testing environment setup.
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
_setup_django()
from django.test.utils import (
setup_test_environment, teardown_test_environment,
)
debug_ini = request.config.getini("django_debug_mode")
if debug_ini == "keep":
debug = None
else:
debug = _get_boolean_value(debug_ini, "django_debug_mode", False)
setup_test_environment(debug=debug)
request.addfinalizer(teardown_test_environment)
@pytest.fixture(scope="session")
def django_db_blocker() -> "Optional[_DatabaseBlocker]":
"""Wrapper around Django's database access.
This object can be used to re-enable database access. This fixture is used
internally in pytest-django to build the other fixtures and can be used for
special database handling.
The object is a context manager and provides the methods
.unblock()/.block() and .restore() to temporarily enable database access.
This is an advanced feature that is meant to be used to implement database
fixtures.
"""
if not django_settings_is_configured():
return None
return _blocking_manager
@pytest.fixture(autouse=True)
def _django_db_marker(request) -> None:
"""Implement the django_db marker, internal to pytest-django.
This will dynamically request the ``db``, ``transactional_db`` or
``django_db_reset_sequences`` fixtures as required by the django_db marker.
"""
marker = request.node.get_closest_marker("django_db")
if marker:
transaction, reset_sequences, databases = validate_django_db(marker)
# TODO: Use pytest Store (item.store) once that's stable.
request.node._pytest_django_databases = databases
if reset_sequences:
request.getfixturevalue("django_db_reset_sequences")
elif transaction:
request.getfixturevalue("transactional_db")
else:
request.getfixturevalue("db")
@pytest.fixture(autouse=True, scope="class")
def _django_setup_unittest(
request,
django_db_blocker: "_DatabaseBlocker",
) -> Generator[None, None, None]:
"""Setup a django unittest, internal to pytest-django."""
if not django_settings_is_configured() or not is_django_unittest(request):
yield
return
# Fix/patch pytest.
# Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991
# After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824
from _pytest.unittest import TestCaseFunction
original_runtest = TestCaseFunction.runtest
def non_debugging_runtest(self) -> None:
self._testcase(result=self)
try:
TestCaseFunction.runtest = non_debugging_runtest # type: ignore[assignment]
request.getfixturevalue("django_db_setup")
with django_db_blocker.unblock():
yield
finally:
TestCaseFunction.runtest = original_runtest # type: ignore[assignment]
@pytest.fixture(scope="function", autouse=True)
def _dj_autoclear_mailbox() -> None:
if not django_settings_is_configured():
return
from django.core import mail
del mail.outbox[:]
@pytest.fixture(scope="function")
def mailoutbox(
django_mail_patch_dns: None,
_dj_autoclear_mailbox: None,
) -> "Optional[List[django.core.mail.EmailMessage]]":
if not django_settings_is_configured():
return None
from django.core import mail
return mail.outbox
@pytest.fixture(scope="function")
def django_mail_patch_dns(
monkeypatch,
django_mail_dnsname: str,
) -> None:
from django.core import mail
monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname)
@pytest.fixture(scope="function")
def django_mail_dnsname() -> str:
return "fake-tests.example.com"
@pytest.fixture(autouse=True, scope="function")
def _django_set_urlconf(request) -> None:
"""Apply the @pytest.mark.urls marker, internal to pytest-django."""
marker = request.node.get_closest_marker("urls")
if marker:
skip_if_no_django()
import django.conf
from django.urls import clear_url_caches, set_urlconf
urls = validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = urls
clear_url_caches()
set_urlconf(None)
def restore() -> None:
django.conf.settings.ROOT_URLCONF = original_urlconf
# Copy the pattern from
# https://github.com/django/django/blob/main/django/test/signals.py#L152
clear_url_caches()
set_urlconf(None)
request.addfinalizer(restore)
@pytest.fixture(autouse=True, scope="session")
def _fail_for_invalid_template_variable():
"""Fixture that fails for invalid variables in templates.
This fixture will fail each test that uses django template rendering
should a template contain an invalid template variable.
The fail message will include the name of the invalid variable and
in most cases the template name.
It does not raise an exception, but fails, as the stack trace doesn't
offer any helpful information to debug.
This behavior can be switched off using the marker:
``pytest.mark.ignore_template_errors``
"""
class InvalidVarException:
"""Custom handler for invalid strings in templates."""
def __init__(self) -> None:
self.fail = True
def __contains__(self, key: str) -> bool:
return key == "%s"
@staticmethod
def _get_origin():
stack = inspect.stack()
# Try to use topmost `self.origin` first (Django 1.9+, and with
# TEMPLATE_DEBUG)..
for f in stack[2:]:
func = f[3]
if func == "render":
frame = f[0]
try:
origin = frame.f_locals["self"].origin
except (AttributeError, KeyError):
continue
if origin is not None:
return origin
from django.template import Template
# finding the ``render`` needle in the stack
frameinfo = reduce(
lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack
)
# assert 0, stack
frame = frameinfo[0]
# finding only the frame locals in all frame members
f_locals = reduce(
lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame)
)[1]
# ``django.template.base.Template``
template = f_locals["self"]
if isinstance(template, Template):
return template.name
def __mod__(self, var: str) -> str:
origin = self._get_origin()
if origin:
msg = "Undefined template variable '{}' in '{}'".format(var, origin)
else:
msg = "Undefined template variable '%s'" % var
if self.fail:
pytest.fail(msg)
else:
return msg
if (
os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true"
and django_settings_is_configured()
):
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = InvalidVarException()
@pytest.fixture(autouse=True)
def _template_string_if_invalid_marker(request) -> None:
"""Apply the @pytest.mark.ignore_template_errors marker,
internal to pytest-django."""
marker = request.keywords.get("ignore_template_errors", None)
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true":
if marker and django_settings_is_configured():
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False
@pytest.fixture(autouse=True, scope="function")
def _django_clear_site_cache() -> None:
"""Clears ``django.contrib.sites.models.SITE_CACHE`` to avoid
unexpected behavior with cached site objects.
"""
if django_settings_is_configured():
from django.conf import settings as dj_settings
if "django.contrib.sites" in dj_settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
Site.objects.clear_cache()
# ############### Helper Functions ################
class _DatabaseBlockerContextManager:
def __init__(self, db_blocker) -> None:
self._db_blocker = db_blocker
def __enter__(self) -> None:
pass
def __exit__(self, exc_type, exc_value, traceback) -> None:
self._db_blocker.restore()
class _DatabaseBlocker:
"""Manager for django.db.backends.base.base.BaseDatabaseWrapper.
This is the object returned by django_db_blocker.
"""
def __init__(self):
self._history = []
self._real_ensure_connection = None
@property
def _dj_db_wrapper(self) -> "django.db.backends.base.base.BaseDatabaseWrapper":
from django.db.backends.base.base import BaseDatabaseWrapper
# The first time the _dj_db_wrapper is accessed, we will save a
# reference to the real implementation.
if self._real_ensure_connection is None:
self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection
return BaseDatabaseWrapper
def _save_active_wrapper(self) -> None:
self._history.append(self._dj_db_wrapper.ensure_connection)
def _blocking_wrapper(*args, **kwargs) -> "NoReturn":
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
raise RuntimeError(
"Database access not allowed, "
'use the "django_db" mark, or the '
'"db" or "transactional_db" fixtures to enable it.'
)
def unblock(self) -> "ContextManager[None]":
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._real_ensure_connection
return _DatabaseBlockerContextManager(self)
def block(self) -> "ContextManager[None]":
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._blocking_wrapper
return _DatabaseBlockerContextManager(self)
def restore(self) -> None:
self._dj_db_wrapper.ensure_connection = self._history.pop()
_blocking_manager = _DatabaseBlocker()
def validate_django_db(marker) -> "_DjangoDb":
"""Validate the django_db marker.
It checks the signature and creates the ``transaction``,
``reset_sequences`` and ``databases`` attributes on the marker
which will have the correct values.
A sequence reset is only allowed when combined with a transaction.
"""
def apifun(
transaction: bool = False,
reset_sequences: bool = False,
databases: "_DjangoDbDatabases" = None,
) -> "_DjangoDb":
return transaction, reset_sequences, databases
return apifun(*marker.args, **marker.kwargs)
def validate_urls(marker) -> List[str]:
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls: List[str]) -> List[str]:
return urls
return apifun(*marker.args, **marker.kwargs)
| 31.582902 | 92 | 0.654212 |
3acb25f591c1735a81974ba0f6e066c5f47e85e5 | 86,216 | py | Python | qa/common/gen_qa_models.py | akaanirban/server | 195b7f65f4fb74d528accdb0f424ff57f5283d82 | [
"BSD-3-Clause"
] | null | null | null | qa/common/gen_qa_models.py | akaanirban/server | 195b7f65f4fb74d528accdb0f424ff57f5283d82 | [
"BSD-3-Clause"
] | null | null | null | qa/common/gen_qa_models.py | akaanirban/server | 195b7f65f4fb74d528accdb0f424ff57f5283d82 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from builtins import range
import os
import sys
import numpy as np
import gen_ensemble_model_utils as emu
FLAGS = None
np_dtype_string = np.dtype(object)
def np_to_model_dtype(np_dtype):
if np_dtype == bool:
return "TYPE_BOOL"
elif np_dtype == np.int8:
return "TYPE_INT8"
elif np_dtype == np.int16:
return "TYPE_INT16"
elif np_dtype == np.int32:
return "TYPE_INT32"
elif np_dtype == np.int64:
return "TYPE_INT64"
elif np_dtype == np.uint8:
return "TYPE_UINT8"
elif np_dtype == np.uint16:
return "TYPE_UINT16"
elif np_dtype == np.float16:
return "TYPE_FP16"
elif np_dtype == np.float32:
return "TYPE_FP32"
elif np_dtype == np.float64:
return "TYPE_FP64"
elif np_dtype == np_dtype_string:
return "TYPE_STRING"
return None
def np_to_tf_dtype(np_dtype):
if np_dtype == bool:
return tf.bool
elif np_dtype == np.int8:
return tf.int8
elif np_dtype == np.int16:
return tf.int16
elif np_dtype == np.int32:
return tf.int32
elif np_dtype == np.int64:
return tf.int64
elif np_dtype == np.uint8:
return tf.uint8
elif np_dtype == np.uint16:
return tf.uint16
elif np_dtype == np.float16:
return tf.float16
elif np_dtype == np.float32:
return tf.float32
elif np_dtype == np.float64:
return tf.float64
elif np_dtype == np_dtype_string:
return tf.string
return None
def np_to_trt_dtype(np_dtype):
if np_dtype == bool:
return trt.bool
elif np_dtype == np.int8:
return trt.int8
elif np_dtype == np.int32:
return trt.int32
elif np_dtype == np.float16:
return trt.float16
elif np_dtype == np.float32:
return trt.float32
return None
def np_to_onnx_dtype(np_dtype):
if np_dtype == bool:
return onnx.TensorProto.BOOL
elif np_dtype == np.int8:
return onnx.TensorProto.INT8
elif np_dtype == np.int16:
return onnx.TensorProto.INT16
elif np_dtype == np.int32:
return onnx.TensorProto.INT32
elif np_dtype == np.int64:
return onnx.TensorProto.INT64
elif np_dtype == np.uint8:
return onnx.TensorProto.UINT8
elif np_dtype == np.uint16:
return onnx.TensorProto.UINT16
elif np_dtype == np.float16:
return onnx.TensorProto.FLOAT16
elif np_dtype == np.float32:
return onnx.TensorProto.FLOAT
elif np_dtype == np.float64:
return onnx.TensorProto.DOUBLE
elif np_dtype == np_dtype_string:
return onnx.TensorProto.STRING
return None
def np_to_torch_dtype(np_dtype):
if np_dtype == bool:
return torch.bool
elif np_dtype == np.int8:
return torch.int8
elif np_dtype == np.int16:
return torch.int16
elif np_dtype == np.int32:
return torch.int
elif np_dtype == np.int64:
return torch.long
elif np_dtype == np.uint8:
return torch.uint8
elif np_dtype == np.uint16:
return None # Not supported in Torch
elif np_dtype == np.float16:
return None
elif np_dtype == np.float32:
return torch.float
elif np_dtype == np.float64:
return torch.double
elif np_dtype == np_dtype_string:
return None # Not supported in Torch
def create_graphdef_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False):
if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
tf_input_dtype = np_to_tf_dtype(input_dtype)
tf_output0_dtype = np_to_tf_dtype(output0_dtype)
tf_output1_dtype = np_to_tf_dtype(output1_dtype)
# Create the model. If non-batching then don't include the batch
# dimension.
tf.reset_default_graph()
if max_batch == 0:
in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"INPUT0")
in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"INPUT1")
else:
in0 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "INPUT0")
in1 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "INPUT1")
# If the input is a string, then convert each string to the
# equivalent int32 value.
if tf_input_dtype == tf.string:
in0 = tf.strings.to_number(in0, tf.int32)
in1 = tf.strings.to_number(in1, tf.int32)
add = tf.add(in0, in1, "ADD")
sub = tf.subtract(in0, in1, "SUB")
# Cast or convert result to the output dtype.
if tf_output0_dtype == tf.string:
cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0")
else:
cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
if tf_output1_dtype == tf.string:
cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
else:
cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
out0 = tf.identity(cast0, "OUTPUT0")
out1 = tf.identity(cast1, "OUTPUT1")
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype,
output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with tf.Session() as sess:
graph_io.write_graph(sess.graph.as_graph_def(),
model_version_dir,
"model.graphdef",
as_text=False)
def create_graphdef_modelconfig(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
output0_label_cnt, version_policy):
if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype,
output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
config = '''
name: "{}"
platform: "tensorflow_graphdef"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_savedmodel_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False):
if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
tf_input_dtype = np_to_tf_dtype(input_dtype)
tf_output0_dtype = np_to_tf_dtype(output0_dtype)
tf_output1_dtype = np_to_tf_dtype(output1_dtype)
# Create the model. If non-batching then don't include the batch
# dimension.
tf.reset_default_graph()
if max_batch == 0:
in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"TENSOR_INPUT0")
in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape),
"TENSOR_INPUT1")
else:
in0 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT0")
in1 = tf.placeholder(tf_input_dtype, [
None,
] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1")
# If the input is a string, then convert each string to the
# equivalent float value.
if tf_input_dtype == tf.string:
in0 = tf.strings.to_number(in0, tf.int32)
in1 = tf.strings.to_number(in1, tf.int32)
add = tf.add(in0, in1, "ADD")
sub = tf.subtract(in0, in1, "SUB")
# Cast or convert result to the output dtype.
if tf_output0_dtype == tf.string:
cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0")
else:
cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0")
if tf_output1_dtype == tf.string:
cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1")
else:
cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1")
out0 = tf.identity(cast0, "TENSOR_OUTPUT0")
out1 = tf.identity(cast1, "TENSOR_OUTPUT1")
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype,
output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with tf.Session() as sess:
input0_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_INPUT0:0")
input1_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_INPUT1:0")
output0_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_OUTPUT0:0")
output1_tensor = tf.get_default_graph().get_tensor_by_name(
"TENSOR_OUTPUT1:0")
tf.saved_model.simple_save(sess,
model_version_dir + "/model.savedmodel",
inputs={
"INPUT0": input0_tensor,
"INPUT1": input1_tensor
},
outputs={
"OUTPUT0": output0_tensor,
"OUTPUT1": output1_tensor
})
def create_savedmodel_modelconfig(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
output0_label_cnt, version_policy):
if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype,
output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
config = '''
name: "{}"
platform: "tensorflow_savedmodel"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_plan_dynamic_rf_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
swap, min_dim, max_dim):
trt_input_dtype = np_to_trt_dtype(input_dtype)
trt_output0_dtype = np_to_trt_dtype(output0_dtype)
trt_output1_dtype = np_to_trt_dtype(output1_dtype)
trt_memory_format = trt.TensorFormat.LINEAR
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
if max_batch == 0:
input_with_batchsize = [i for i in input_shape]
else:
input_with_batchsize = [-1] + [i for i in input_shape]
in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize)
in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize)
add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM)
sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB)
out0 = add if not swap else sub
out1 = sub if not swap else add
out0.get_output(0).name = "OUTPUT0"
out1.get_output(0).name = "OUTPUT1"
network.mark_output(out0.get_output(0))
network.mark_output(out1.get_output(0))
out0.get_output(0).dtype = trt_output0_dtype
out1.get_output(0).dtype = trt_output1_dtype
in0.allowed_formats = 1 << int(trt_memory_format)
in1.allowed_formats = 1 << int(trt_memory_format)
out0.get_output(0).allowed_formats = 1 << int(trt_memory_format)
out1.get_output(0).allowed_formats = 1 << int(trt_memory_format)
if (trt_input_dtype == trt.int8):
in0.dynamic_range = (-128.0, 127.0)
in1.dynamic_range = (-128.0, 127.0)
if (trt_output0_dtype == trt.int8):
out0.get_output(0).dynamic_range = (-128.0, 127.0)
if (trt_output1_dtype == trt.int8):
out1.get_output(0).dynamic_range = (-128.0, 127.0)
min_shape = []
opt_shape = []
max_shape = []
if max_batch != 0:
min_shape = min_shape + [1]
opt_shape = opt_shape + [max(1, max_batch)]
max_shape = max_shape + [max(1, max_batch)]
for i in input_shape:
if i == -1:
min_shape = min_shape + [min_dim]
opt_shape = opt_shape + [int((max_dim + min_dim) / 2)]
max_shape = max_shape + [max_dim]
else:
min_shape = min_shape + [i]
opt_shape = opt_shape + [i]
max_shape = max_shape + [i]
profile = builder.create_optimization_profile()
profile.set_shape("INPUT0", min_shape, opt_shape, max_shape)
profile.set_shape("INPUT1", min_shape, opt_shape, max_shape)
flags = 1 << int(trt.BuilderFlag.STRICT_TYPES)
datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype])
for dt in datatype_set:
if (dt == trt.int8):
flags |= 1 << int(trt.BuilderFlag.INT8)
elif (dt == trt.float16):
flags |= 1 << int(trt.BuilderFlag.FP16)
config = builder.create_builder_config()
config.flags = flags
config.add_optimization_profile(profile)
config.max_workspace_size = 1 << 20
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
engine_bytes = engine.serialize()
del engine
# Use a different model name for different kinds of models
model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan",
input_dtype, output0_dtype, output1_dtype)
if min_dim != 1 or max_dim != 32:
model_name = "{}-{}-{}".format(model_name, min_dim, max_dim)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
del builder
def create_plan_dynamic_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
swap, min_dim, max_dim):
trt_input_dtype = np_to_trt_dtype(input_dtype)
trt_output0_dtype = np_to_trt_dtype(output0_dtype)
trt_output1_dtype = np_to_trt_dtype(output1_dtype)
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
if max_batch == 0:
input_with_batchsize = [i for i in input_shape]
else:
input_with_batchsize = [-1] + [i for i in input_shape]
in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize)
in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize)
add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM)
sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB)
out0 = add if not swap else sub
out1 = sub if not swap else add
out0.get_output(0).name = "OUTPUT0"
out1.get_output(0).name = "OUTPUT1"
network.mark_output(out0.get_output(0))
network.mark_output(out1.get_output(0))
min_shape = []
opt_shape = []
max_shape = []
for i in input_shape:
if i == -1:
min_shape = min_shape + [min_dim]
opt_shape = opt_shape + [int((max_dim + min_dim) / 2)]
max_shape = max_shape + [max_dim]
else:
min_shape = min_shape + [i]
opt_shape = opt_shape + [i]
max_shape = max_shape + [i]
config = builder.create_builder_config()
# create multiple profiles with same shape for testing
# with decreasing batch sizes
profile = []
for i in range(4):
profile.append(builder.create_optimization_profile())
if max_batch == 0:
profile[i].set_shape("INPUT0", min_shape, opt_shape, max_shape)
profile[i].set_shape("INPUT1", min_shape, opt_shape, max_shape)
else:
bs = [max_batch - i if max_batch > i else 1]
opt_bs = [1 + i if 1 + i < max_batch - 1 else max_batch - 1]
# Hardcoded 'max_shape[0] += 1' in default profile for
# L0_trt_dynamic_shape, to differentiate whether default profile
# is used if no profile is specified
max_shape_override = max_shape
if i == 0 and (min_dim == 1 and max_dim == 32):
max_shape_override[0] += 1
profile[i].set_shape("INPUT0", [1] + min_shape, opt_bs + opt_shape,
bs + max_shape_override)
profile[i].set_shape("INPUT1", [1] + min_shape, opt_bs + opt_shape,
bs + max_shape_override)
config.add_optimization_profile(profile[i])
# some profiles with non-one min shape for first dim to test autofiller
for i in range(2):
profile.append(builder.create_optimization_profile())
if max_batch == 0:
profile[i + 4].set_shape("INPUT0", min_shape, opt_shape, max_shape)
profile[i + 4].set_shape("INPUT1", min_shape, opt_shape, max_shape)
else:
profile[i + 4].set_shape("INPUT0", [5 + i] + min_shape,
[6] + opt_shape, [max_batch] + max_shape)
profile[i + 4].set_shape("INPUT1", [5 + i] + min_shape,
[6] + opt_shape, [max_batch] + max_shape)
config.add_optimization_profile(profile[i + 4])
# Will repeat another profile with same min and max shapes as the first profile to test non-zero profile
# for infer_variable test.
profile.append(builder.create_optimization_profile())
if max_batch == 0:
profile[6].set_shape("INPUT0", min_shape, opt_shape, max_shape)
profile[6].set_shape("INPUT1", min_shape, opt_shape, max_shape)
else:
profile[6].set_shape("INPUT0", [1] + min_shape, [1] + opt_shape,
[max_batch] + max_shape)
profile[6].set_shape("INPUT1", [1] + min_shape, [1] + opt_shape,
[max_batch] + max_shape)
config.add_optimization_profile(profile[6])
# Will add some profiles with static shapes to test the cases where min_shape=opt_shape=max_shape
for i in range(3):
profile.append(builder.create_optimization_profile())
if max_batch == 0:
static_shape = max_shape
profile[7 + i].set_shape("INPUT0", static_shape, static_shape,
static_shape)
profile[7 + i].set_shape("INPUT1", static_shape, static_shape,
static_shape)
else:
# Skipping alternate batch sizes for testing unsupported batches in L0_trt_dynamic_shape.
full_static_shape = [1 + (2 * i)] + max_shape
profile[7 + i].set_shape("INPUT0", full_static_shape,
full_static_shape, full_static_shape)
profile[7 + i].set_shape("INPUT1", full_static_shape,
full_static_shape, full_static_shape)
config.add_optimization_profile(profile[7 + i])
config.max_workspace_size = 1 << 20
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
engine_bytes = engine.serialize()
del engine
# Use a different model name for different kinds of models
model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan",
input_dtype, output0_dtype, output1_dtype)
if min_dim != 1 or max_dim != 32:
model_name = "{}-{}-{}".format(model_name, min_dim, max_dim)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
del builder
def create_plan_fixed_rf_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
swap):
trt_input_dtype = np_to_trt_dtype(input_dtype)
trt_output0_dtype = np_to_trt_dtype(output0_dtype)
trt_output1_dtype = np_to_trt_dtype(output1_dtype)
trt_memory_format = trt.TensorFormat.LINEAR
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network()
in0 = network.add_input("INPUT0", trt_input_dtype, input_shape)
in1 = network.add_input("INPUT1", trt_input_dtype, input_shape)
add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM)
sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB)
out0 = add if not swap else sub
out1 = sub if not swap else add
out0.get_output(0).name = "OUTPUT0"
out1.get_output(0).name = "OUTPUT1"
network.mark_output(out0.get_output(0))
network.mark_output(out1.get_output(0))
out0.get_output(0).dtype = trt_output0_dtype
out1.get_output(0).dtype = trt_output1_dtype
in0.allowed_formats = 1 << int(trt_memory_format)
in1.allowed_formats = 1 << int(trt_memory_format)
out0.get_output(0).allowed_formats = 1 << int(trt_memory_format)
out1.get_output(0).allowed_formats = 1 << int(trt_memory_format)
if (trt_input_dtype == trt.int8):
in0.dynamic_range = (-128.0, 127.0)
in1.dynamic_range = (-128.0, 127.0)
if (trt_output0_dtype == trt.int8):
out0.get_output(0).dynamic_range = (-128.0, 127.0)
if (trt_output1_dtype == trt.int8):
out1.get_output(0).dynamic_range = (-128.0, 127.0)
flags = 1 << int(trt.BuilderFlag.STRICT_TYPES)
datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype])
for dt in datatype_set:
if (dt == trt.int8):
flags |= 1 << int(trt.BuilderFlag.INT8)
elif (dt == trt.float16):
flags |= 1 << int(trt.BuilderFlag.FP16)
config = builder.create_builder_config()
config.flags = flags
config.max_workspace_size = 1 << 20
builder.max_batch_size = max(1, max_batch)
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
engine_bytes = engine.serialize()
del engine
model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan",
input_dtype, output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
del builder
def create_plan_fixed_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
swap):
trt_input_dtype = np_to_trt_dtype(input_dtype)
trt_output0_dtype = np_to_trt_dtype(output0_dtype)
trt_output1_dtype = np_to_trt_dtype(output1_dtype)
# Create the model
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network()
in0 = network.add_input("INPUT0", trt_input_dtype, input_shape)
in1 = network.add_input("INPUT1", trt_input_dtype, input_shape)
add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM)
sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB)
out0 = add if not swap else sub
out1 = sub if not swap else add
out0.get_output(0).name = "OUTPUT0"
out1.get_output(0).name = "OUTPUT1"
network.mark_output(out0.get_output(0))
network.mark_output(out1.get_output(0))
config = builder.create_builder_config()
config.max_workspace_size = 1 << 20
builder.max_batch_size = max(1, max_batch)
try:
engine_bytes = builder.build_serialized_network(network, config)
except AttributeError:
engine = builder.build_engine(network, config)
engine_bytes = engine.serialize()
del engine
del network
model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan",
input_dtype, output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
with open(model_version_dir + "/model.plan", "wb") as f:
f.write(engine_bytes)
del builder
def create_plan_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False,
min_dim=1,
max_dim=32):
if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
if input_dtype != np.float32 or output0_dtype != np.float32 or output1_dtype != np.float32:
if (not tu.shape_is_fixed(input_shape) or
not tu.shape_is_fixed(output0_shape) or
not tu.shape_is_fixed(output1_shape)):
create_plan_dynamic_rf_modelfile(models_dir, max_batch,
model_version, input_shape,
output0_shape, output1_shape,
input_dtype, output0_dtype,
output1_dtype, swap, min_dim,
max_dim)
else:
create_plan_fixed_rf_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape,
output1_shape, input_dtype,
output0_dtype, output1_dtype, swap)
else:
if (not tu.shape_is_fixed(input_shape) or
not tu.shape_is_fixed(output0_shape) or
not tu.shape_is_fixed(output1_shape)):
create_plan_dynamic_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape,
output1_shape, input_dtype,
output0_dtype, output1_dtype, swap,
min_dim, max_dim)
else:
create_plan_fixed_modelfile(models_dir, max_batch, model_version,
input_shape, output0_shape,
output1_shape, input_dtype,
output0_dtype, output1_dtype, swap)
def create_plan_modelconfig(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
output0_label_cnt,
version_policy,
min_dim=1,
max_dim=32):
if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for different kinds of models
model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan",
input_dtype, output0_dtype, output1_dtype)
if min_dim != 1 or max_dim != 32:
model_name = "{}-{}-{}".format(model_name, min_dim, max_dim)
config_dir = models_dir + "/" + model_name
if -1 in input_shape:
# Selects the sixth profile for FP32 datatype
# Note the min and max shapes of first and sixth
# profile are identical.
profile_index = 6 if input_dtype == np.float32 else 0
config = '''
name: "{}"
platform: "tensorrt_plan"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
instance_group [
{{
profile:"{}"
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape), profile_index)
else:
config = '''
name: "{}"
platform: "tensorrt_plan"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_onnx_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False):
if not tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape,
output1_shape):
return
onnx_input_dtype = np_to_onnx_dtype(input_dtype)
onnx_output0_dtype = np_to_onnx_dtype(output0_dtype)
onnx_output1_dtype = np_to_onnx_dtype(output1_dtype)
onnx_input_shape, idx = tu.shape_to_onnx_shape(input_shape, 0)
onnx_output0_shape, idx = tu.shape_to_onnx_shape(input_shape, idx)
onnx_output1_shape, idx = tu.shape_to_onnx_shape(input_shape, idx)
# Create the model
model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx",
input_dtype, output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
batch_dim = [] if max_batch == 0 else [None]
in0 = onnx.helper.make_tensor_value_info("INPUT0", onnx_input_dtype,
batch_dim + onnx_input_shape)
in1 = onnx.helper.make_tensor_value_info("INPUT1", onnx_input_dtype,
batch_dim + onnx_input_shape)
out0 = onnx.helper.make_tensor_value_info("OUTPUT0", onnx_output0_dtype,
batch_dim + onnx_output0_shape)
out1 = onnx.helper.make_tensor_value_info("OUTPUT1", onnx_output1_dtype,
batch_dim + onnx_output1_shape)
internal_in0 = onnx.helper.make_node("Identity", ["INPUT0"], ["_INPUT0"])
internal_in1 = onnx.helper.make_node("Identity", ["INPUT1"], ["_INPUT1"])
# cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type
# Also casting String data type to int32
if ((onnx_input_dtype == onnx.TensorProto.INT8) or
(onnx_input_dtype == onnx.TensorProto.INT16) or
(onnx_input_dtype == onnx.TensorProto.STRING)):
internal_in0 = onnx.helper.make_node("Cast", ["INPUT0"], ["_INPUT0"],
to=onnx.TensorProto.INT32)
internal_in1 = onnx.helper.make_node("Cast", ["INPUT1"], ["_INPUT1"],
to=onnx.TensorProto.INT32)
add = onnx.helper.make_node("Add", ["_INPUT0", "_INPUT1"],
["CAST0" if not swap else "CAST1"])
sub = onnx.helper.make_node("Sub", ["_INPUT0", "_INPUT1"],
["CAST1" if not swap else "CAST0"])
cast0 = onnx.helper.make_node("Cast", ["CAST0"], ["OUTPUT0"],
to=onnx_output0_dtype)
cast1 = onnx.helper.make_node("Cast", ["CAST1"], ["OUTPUT1"],
to=onnx_output1_dtype)
# Avoid cast from float16 to float16
# (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32)
if onnx_input_dtype == onnx.TensorProto.FLOAT16:
if onnx_output0_dtype == onnx_input_dtype:
cast0 = onnx.helper.make_node("Identity", ["CAST0"], ["OUTPUT0"])
if onnx_output1_dtype == onnx_input_dtype:
cast1 = onnx.helper.make_node("Identity", ["CAST1"], ["OUTPUT1"])
onnx_nodes = [internal_in0, internal_in1, add, sub, cast0, cast1]
onnx_inputs = [in0, in1]
onnx_outputs = [out0, out1]
graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs,
onnx_outputs)
if FLAGS.onnx_opset > 0:
model_opset = onnx.helper.make_operatorsetid("", FLAGS.onnx_opset)
model_def = onnx.helper.make_model(graph_proto,
producer_name="triton",
opset_imports=[model_opset])
else:
model_def = onnx.helper.make_model(graph_proto, producer_name="triton")
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
onnx.save(model_def, model_version_dir + "/model.onnx")
def create_onnx_modelconfig(models_dir, max_batch, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy):
if not tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape,
output1_shape):
return
# Use a different model name for the non-batching variant
model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx",
input_dtype, output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
# [TODO] move create_general_modelconfig() out of emu as it is general
# enough for all backends to use
config = emu.create_general_modelconfig(model_name,
"onnxruntime_onnx",
max_batch,
emu.repeat(input_dtype, 2),
emu.repeat(input_shape, 2),
emu.repeat(None, 2),
[output0_dtype, output1_dtype],
[output0_shape, output1_shape],
emu.repeat(None, 2),
["output0_labels.txt", None],
version_policy=version_policy,
force_tensor_number_suffix=True)
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_libtorch_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False):
if not tu.validate_for_libtorch_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
return
torch_input_dtype = np_to_torch_dtype(input_dtype)
torch_output0_dtype = np_to_torch_dtype(output0_dtype)
torch_output1_dtype = np_to_torch_dtype(output1_dtype)
model_name = tu.get_model_name(
"libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype,
output0_dtype, output1_dtype)
# handle for -1 (when variable) since can't create tensor with shape of [-1]
input_shape = [abs(ips) for ips in input_shape]
# Create the model
if not swap:
class AddSubNet(nn.Module):
def __init__(self, *args):
self.torch_output0_dtype = args[0][0]
self.torch_output1_dtype = args[0][1]
super(AddSubNet, self).__init__()
def forward(self, input0, input1):
return (input0 + input1).to(self.torch_output0_dtype), \
(input0 - input1).to(self.torch_output1_dtype)
addSubModel = AddSubNet((torch_output0_dtype, torch_output1_dtype))
example_input = torch.zeros(input_shape, dtype=torch_input_dtype)
traced = torch.jit.trace(addSubModel, (example_input, example_input))
else:
class SubAddNet(nn.Module):
def __init__(self, *args):
self.torch_output0_dtype = args[0][0]
self.torch_output1_dtype = args[0][1]
super(SubAddNet, self).__init__()
def forward(self, input0, input1):
return (input0 - input1).to(self.torch_output0_dtype), \
(input0 + input1).to(self.torch_output1_dtype)
subAddModel = SubAddNet((torch_output0_dtype, torch_output1_dtype))
example_input = torch.zeros(input_shape, dtype=torch_input_dtype)
traced = torch.jit.trace(subAddModel, (example_input, example_input))
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
traced.save(model_version_dir + "/model.pt")
def create_libtorch_modelconfig(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
output0_label_cnt, version_policy):
if not tu.validate_for_libtorch_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype,
output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
config = '''
name: "{}"
platform: "pytorch_libtorch"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT__0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT__1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT__0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT__1"
data_type: {}
dims: [ {} ]
}}
]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_openvino_modelfile(models_dir,
max_batch,
model_version,
input_shape,
output0_shape,
output1_shape,
input_dtype,
output0_dtype,
output1_dtype,
swap=False):
if not tu.validate_for_openvino_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
return
# Create the model
model_name = tu.get_model_name(
"openvino_nobatch" if max_batch == 0 else "openvino", input_dtype,
output0_dtype, output1_dtype)
model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)
batch_dim = () if max_batch == 0 else (max_batch,)
in0 = ng.parameter(shape=batch_dim + input_shape, dtype=input_dtype, name="INPUT0")
in1 = ng.parameter(shape=batch_dim + input_shape, dtype=input_dtype, name="INPUT1")
r0 = ng.add(in0, in1) if not swap else ng.subtract(in0, in1)
r1 = ng.subtract(in0, in1) if not swap else ng.add(in0, in1)
result0 = ng.reshape(r0, batch_dim + output0_shape, special_zero=False)
result1 = ng.reshape(r1, batch_dim + output1_shape, special_zero=False)
op0 = ng.convert(result0, destination_type=output0_dtype, name="OUTPUT0")
op1 = ng.convert(result1, destination_type=output1_dtype, name="OUTPUT1")
function = ng.impl.Function([op0, op1], [in0, in1], model_name)
ie_network = IENetwork(ng.impl.Function.to_capsule(function))
try:
os.makedirs(model_version_dir)
except OSError as ex:
pass # ignore existing dir
ie_network.serialize(model_version_dir + "/model.xml",
model_version_dir + "/model.bin")
def create_openvino_modelconfig(models_dir, max_batch, model_version,
input_shape, output0_shape, output1_shape,
input_dtype, output0_dtype, output1_dtype,
output0_label_cnt, version_policy):
if not tu.validate_for_openvino_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
return
# Unpack version policy
version_policy_str = "{ latest { num_versions: 1 }}"
if version_policy is not None:
type, val = version_policy
if type == 'latest':
version_policy_str = "{{ latest {{ num_versions: {} }}}}".format(
val)
elif type == 'specific':
version_policy_str = "{{ specific {{ versions: {} }}}}".format(val)
else:
version_policy_str = "{ all { }}"
# Use a different model name for the non-batching variant
model_name = tu.get_model_name(
"openvino_nobatch" if max_batch == 0 else "openvino", input_dtype,
output0_dtype, output1_dtype)
config_dir = models_dir + "/" + model_name
# platform is empty and backend is 'openvino' for openvino model
config = '''
name: "{}"
backend: "openvino"
max_batch_size: {}
version_policy: {}
input [
{{
name: "INPUT0"
data_type: {}
dims: [ {} ]
}},
{{
name: "INPUT1"
data_type: {}
dims: [ {} ]
}}
]
output [
{{
name: "OUTPUT0"
data_type: {}
dims: [ {} ]
label_filename: "output0_labels.txt"
}},
{{
name: "OUTPUT1"
data_type: {}
dims: [ {} ]
}}
]
instance_group [ {{ kind: KIND_CPU }}]
'''.format(model_name, max_batch, version_policy_str,
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape),
np_to_model_dtype(output0_dtype),
tu.shape_to_dims_str(output0_shape),
np_to_model_dtype(output1_dtype),
tu.shape_to_dims_str(output1_shape))
try:
os.makedirs(config_dir)
except OSError as ex:
pass # ignore existing dir
with open(config_dir + "/config.pbtxt", "w") as cfile:
cfile.write(config)
with open(config_dir + "/output0_labels.txt", "w") as lfile:
for l in range(output0_label_cnt):
lfile.write("label" + str(l) + "\n")
def create_models(models_dir,
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
output0_label_cnt,
version_policy=None):
model_version = 1
# Create two models, one that supports batching with a max-batch
# of 8, and one that does not with a max-batch of 0
if FLAGS.graphdef:
# max-batch 8
create_graphdef_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_graphdef_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_graphdef_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_graphdef_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.savedmodel:
# max-batch 8
create_savedmodel_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_savedmodel_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_savedmodel_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_savedmodel_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.tensorrt:
# max-batch 8
suffix = ()
if input_dtype == np.int8 or output0_dtype == np.int8 or output1_dtype == np.int8:
suffix = (1, 1)
create_plan_modelconfig(models_dir, 8, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_plan_modelfile(models_dir, 8, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_plan_modelconfig(models_dir, 0, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_plan_modelfile(models_dir, 0, model_version,
input_shape + suffix, output0_shape + suffix,
output1_shape + suffix, input_dtype,
output0_dtype, output1_dtype)
if -1 in input_shape:
# models for testing optimization profiles
create_plan_modelconfig(models_dir,
8,
model_version,
input_shape + suffix,
output0_shape + suffix,
output1_shape + suffix,
input_dtype,
output0_dtype,
output1_dtype,
output0_label_cnt,
version_policy,
min_dim=4,
max_dim=32)
create_plan_modelfile(models_dir,
8,
model_version,
input_shape + suffix,
output0_shape + suffix,
output1_shape + suffix,
input_dtype,
output0_dtype,
output1_dtype,
min_dim=4,
max_dim=32)
if FLAGS.onnx:
# max-batch 8
create_onnx_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_onnx_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_onnx_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype, output0_label_cnt,
version_policy)
create_onnx_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.libtorch:
# max-batch 8
create_libtorch_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_libtorch_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_libtorch_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_libtorch_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.openvino:
# max-batch 8
create_openvino_modelconfig(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_openvino_modelfile(models_dir, 8, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
create_openvino_modelconfig(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
create_openvino_modelfile(models_dir, 0, model_version, input_shape,
output0_shape, output1_shape, input_dtype,
output0_dtype, output1_dtype)
if FLAGS.ensemble:
for pair in emu.platform_types_and_validation():
if not pair[1](input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
continue
config_input_shape = input_shape
config_output0_shape = output0_shape
config_output1_shape = output1_shape
if pair[0] == "plan":
if len(input_shape) == 1 and input_dtype == np.int8:
config_input_shape = (input_shape[0], 1, 1)
if len(output0_shape) == 1 and output0_dtype == np.int8:
config_output0_shape = (output0_shape[0], 1, 1)
if len(output1_shape) == 1 and output1_dtype == np.int8:
config_output1_shape = (output1_shape[0], 1, 1)
# max-batch 8
emu.create_ensemble_modelconfig(pair[0], models_dir, 8,
model_version, config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
emu.create_ensemble_modelfile(pair[0], models_dir, 8, model_version,
config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype)
# max-batch 0
emu.create_ensemble_modelconfig(pair[0], models_dir, 0,
model_version, config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype,
output0_label_cnt, version_policy)
emu.create_ensemble_modelfile(pair[0], models_dir, 0, model_version,
config_input_shape,
config_output0_shape,
config_output1_shape, input_dtype,
output0_dtype, output1_dtype)
def create_fixed_models(models_dir,
input_dtype,
output0_dtype,
output1_dtype,
version_policy=None):
input_size = 16
create_models(models_dir, input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,), (input_size,), input_size,
version_policy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--models_dir',
type=str,
required=True,
help='Top-level model directory')
parser.add_argument('--graphdef',
required=False,
action='store_true',
help='Generate GraphDef models')
parser.add_argument('--savedmodel',
required=False,
action='store_true',
help='Generate SavedModel models')
parser.add_argument('--tensorrt',
required=False,
action='store_true',
help='Generate TensorRT PLAN models')
parser.add_argument('--onnx',
required=False,
action='store_true',
help='Generate Onnx Runtime Onnx models')
parser.add_argument(
'--onnx_opset',
type=int,
required=False,
default=0,
help='Opset used for Onnx models. Default is to use ONNXRT default')
parser.add_argument('--libtorch',
required=False,
action='store_true',
help='Generate Pytorch LibTorch models')
parser.add_argument('--openvino',
required=False,
action='store_true',
help='Generate Openvino models')
parser.add_argument('--variable',
required=False,
action='store_true',
help='Used variable-shape tensors for input/output')
parser.add_argument('--ensemble',
required=False,
action='store_true',
help='Generate ensemble models against the models' +
' in all platforms. Note that the models generated' +
' are not completed.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.graphdef or FLAGS.savedmodel:
import tensorflow as tf
from tensorflow.python.framework import graph_io, graph_util
if FLAGS.tensorrt:
import tensorrt as trt
if FLAGS.onnx:
import onnx
if FLAGS.libtorch:
import torch
from torch import nn
if FLAGS.openvino:
from openvino.inference_engine import IECore, IENetwork
import ngraph as ng
import test_util as tu
# Tests with models that accept fixed-shape input/output tensors
if not FLAGS.variable:
create_fixed_models(FLAGS.models_dir, np.int8, np.int8, np.int8,
('latest', 1))
create_fixed_models(FLAGS.models_dir, np.int16, np.int16, np.int16,
('latest', 2))
create_fixed_models(FLAGS.models_dir, np.int32, np.int32, np.int32,
('all', None))
create_fixed_models(FLAGS.models_dir, np.int64, np.int64, np.int64)
create_fixed_models(FLAGS.models_dir, np.float16, np.float16,
np.float16, ('specific', [
1,
]))
create_fixed_models(FLAGS.models_dir, np.float32, np.float32,
np.float32, ('specific', [1, 3]))
create_fixed_models(FLAGS.models_dir, np.float16, np.float32,
np.float32)
create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int8)
create_fixed_models(FLAGS.models_dir, np.int8, np.int32, np.int32)
create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int16)
create_fixed_models(FLAGS.models_dir, np.int32, np.float32, np.float32)
create_fixed_models(FLAGS.models_dir, np.float32, np.int32, np.int32)
create_fixed_models(FLAGS.models_dir, np.int32, np.float16, np.int16)
create_fixed_models(FLAGS.models_dir, np_dtype_string, np.int32,
np.int32)
create_fixed_models(FLAGS.models_dir, np_dtype_string, np_dtype_string,
np_dtype_string)
create_fixed_models(FLAGS.models_dir, np_dtype_string, np.int32,
np_dtype_string)
create_fixed_models(FLAGS.models_dir, np_dtype_string, np_dtype_string,
np.int32)
create_fixed_models(FLAGS.models_dir, np.int32, np_dtype_string,
np_dtype_string)
create_fixed_models(FLAGS.models_dir, np.int32, np.int32,
np_dtype_string)
create_fixed_models(FLAGS.models_dir, np.int32, np_dtype_string,
np.int32)
# Make multiple versions of some models for version testing
# (they use different version policies when created above)
if FLAGS.graphdef:
for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]:
create_graphdef_modelfile(FLAGS.models_dir,
8,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_graphdef_modelfile(FLAGS.models_dir,
8,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_graphdef_modelfile(FLAGS.models_dir,
0,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_graphdef_modelfile(FLAGS.models_dir,
0,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
if FLAGS.savedmodel:
for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]:
create_savedmodel_modelfile(FLAGS.models_dir,
8,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_savedmodel_modelfile(FLAGS.models_dir,
8,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_savedmodel_modelfile(FLAGS.models_dir,
0,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_savedmodel_modelfile(FLAGS.models_dir,
0,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
if FLAGS.tensorrt:
for vt in [np.float32, np.float16, np.int32]:
create_plan_modelfile(FLAGS.models_dir,
8,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_plan_modelfile(FLAGS.models_dir,
8,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_plan_modelfile(FLAGS.models_dir,
0,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_plan_modelfile(FLAGS.models_dir,
0,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
vt = np.int8
#handle INT8 separately as it doesn't allow 1d tensors
create_plan_modelfile(FLAGS.models_dir,
8,
2, (16, 1, 1), (16, 1, 1), (16, 1, 1),
vt,
vt,
vt,
swap=True)
create_plan_modelfile(FLAGS.models_dir,
8,
3, (16, 1, 1), (16, 1, 1), (16, 1, 1),
vt,
vt,
vt,
swap=True)
create_plan_modelfile(FLAGS.models_dir,
0,
2, (16, 1, 1), (16, 1, 1), (16, 1, 1),
vt,
vt,
vt,
swap=True)
create_plan_modelfile(FLAGS.models_dir,
0,
3, (16, 1, 1), (16, 1, 1), (16, 1, 1),
vt,
vt,
vt,
swap=True)
if FLAGS.onnx:
for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]:
create_onnx_modelfile(FLAGS.models_dir,
8,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_onnx_modelfile(FLAGS.models_dir,
8,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_onnx_modelfile(FLAGS.models_dir,
0,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_onnx_modelfile(FLAGS.models_dir,
0,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
if FLAGS.libtorch:
for vt in [np.float32, np.int32, np.int16, np.int8]:
create_libtorch_modelfile(FLAGS.models_dir,
8,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_libtorch_modelfile(FLAGS.models_dir,
8,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_libtorch_modelfile(FLAGS.models_dir,
0,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_libtorch_modelfile(FLAGS.models_dir,
0,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
if FLAGS.openvino:
for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]:
create_openvino_modelfile(FLAGS.models_dir,
8,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_openvino_modelfile(FLAGS.models_dir,
8,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_openvino_modelfile(FLAGS.models_dir,
0,
2, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
create_openvino_modelfile(FLAGS.models_dir,
0,
3, (16,), (16,), (16,),
vt,
vt,
vt,
swap=True)
if FLAGS.ensemble:
for pair in emu.platform_types_and_validation():
for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]:
shape = (16, 1, 1) if (pair[0] == "plan" and
vt == np.int8) else (16,)
if not pair[1](vt, vt, vt, shape, shape, shape):
continue
emu.create_ensemble_modelfile(pair[0],
FLAGS.models_dir,
8,
2,
shape,
shape,
shape,
vt,
vt,
vt,
swap=True)
emu.create_ensemble_modelfile(pair[0],
FLAGS.models_dir,
8,
3,
shape,
shape,
shape,
vt,
vt,
vt,
swap=True)
emu.create_ensemble_modelfile(pair[0],
FLAGS.models_dir,
0,
2,
shape,
shape,
shape,
vt,
vt,
vt,
swap=True)
emu.create_ensemble_modelfile(pair[0],
FLAGS.models_dir,
0,
3,
shape,
shape,
shape,
vt,
vt,
vt,
swap=True)
# Tests with models that accept variable-shape input/output tensors
if FLAGS.variable:
create_models(FLAGS.models_dir, np.float32, np.float32, np.float32,
(-1,), (-1,), (-1,), 16)
create_models(FLAGS.models_dir, np.float32, np.int32, np.int32,
(-1, -1), (-1, -1), (-1, -1), 16)
create_models(FLAGS.models_dir, np.float32, np.int64, np.int64, (8, -1),
(8, -1), (8, -1), 32)
create_models(FLAGS.models_dir, np.float32, np.int32, np.int64,
(-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32)
create_models(FLAGS.models_dir, np.float32, np.float32, np.int32, (-1,),
(-1,), (-1,), 16)
create_models(FLAGS.models_dir, np.int32, np.int32, np.int32, (-1, -1),
(-1, -1), (-1, -1), 16)
create_models(FLAGS.models_dir, np.int32, np.int32, np.float32,
(-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32)
create_models(FLAGS.models_dir, np_dtype_string, np_dtype_string,
np_dtype_string, (-1,), (-1,), (-1,), 16)
create_models(FLAGS.models_dir, np_dtype_string, np.int32, np.int32,
(-1, -1), (-1, -1), (-1, -1), 16)
create_models(FLAGS.models_dir, np_dtype_string, np_dtype_string,
np.int32, (8, -1), (8, -1), (8, -1), 32)
create_models(FLAGS.models_dir, np_dtype_string, np.int32,
np_dtype_string, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1),
32)
if FLAGS.ensemble:
# Create utility models used in ensemble
# nop (only creates model config, should add model file before use)
model_dtypes = ["TYPE_BOOL", "TYPE_STRING"]
for s in [8, 16, 32, 64]:
for t in ["INT", "UINT", "FP"]:
if t == "FP" and s == 8:
continue
model_dtypes.append("TYPE_{}{}".format(t, s))
for model_dtype in model_dtypes:
# Use variable size to handle all shape. Note: piping variable size output
# to fixed size model is not safe but doable
for model_shape in [(-1,), (-1, -1), (-1, -1, -1)]:
emu.create_nop_modelconfig(FLAGS.models_dir, model_shape,
model_dtype)
| 41.771318 | 108 | 0.513547 |
bc8068af8abd32fb825028a187f30ff1298286cd | 385 | py | Python | esmvalcore/cmor/_fixes/cmip6/cesm2_waccm_fv2.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 26 | 2019-06-07T07:50:07.000Z | 2022-03-22T21:04:01.000Z | esmvalcore/cmor/_fixes/cmip6/cesm2_waccm_fv2.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 1,370 | 2019-06-06T09:03:07.000Z | 2022-03-31T04:37:20.000Z | esmvalcore/cmor/_fixes/cmip6/cesm2_waccm_fv2.py | valeriupredoi/ESMValCore | b46b948c47d8579d997b28501f8588f5531aa354 | [
"Apache-2.0"
] | 26 | 2019-07-03T13:08:48.000Z | 2022-03-02T16:08:47.000Z | """Fixes for CESM2-WACCM-FV2 model."""
from .cesm2 import Tas as BaseTas
from .cesm2 import Fgco2 as BaseFgco2
from .cesm2_waccm import Cl as BaseCl
from .cesm2_waccm import Cli as BaseCli
from .cesm2_waccm import Clw as BaseClw
from ..common import SiconcFixScalarCoord
Cl = BaseCl
Cli = BaseCli
Clw = BaseClw
Fgco2 = BaseFgco2
Siconc = SiconcFixScalarCoord
Tas = BaseTas
| 14.807692 | 41 | 0.766234 |
faeeff5f06678ae1703b29b194f06d18b5fc11f6 | 8,116 | py | Python | chinese_checkers/TinyGUI.py | davidschulte/alpha-thesis | a9d6d2f0b91a2c8d6ae8605db1e3e92586cc1866 | [
"MIT"
] | null | null | null | chinese_checkers/TinyGUI.py | davidschulte/alpha-thesis | a9d6d2f0b91a2c8d6ae8605db1e3e92586cc1866 | [
"MIT"
] | null | null | null | chinese_checkers/TinyGUI.py | davidschulte/alpha-thesis | a9d6d2f0b91a2c8d6ae8605db1e3e92586cc1866 | [
"MIT"
] | null | null | null | import pygame
import numpy as np
from chinese_checkers.TinyChineseCheckersGame import ChineseCheckersGame as Game
MOVES = [[0, 1], [-1, 1], [1, -1], [1, 0]]
DIM = 9
Y_OFFSET = 17
X_OFFSET = 17
Y_STEP = 47.2 * 1.5
X_STEP = 54.5 * 1.5
R = 12
RED = (255,0,0)
GREEN = (0, 200, 0 )
LIGHT_GREEN = (0, 100, 0)
BLUE = (0,0,255,255)
YELLOW = (255,255,0)
PINK = (255,105,180)
BLACK = (0,0,0,0)
WHITE = (255, 255, 255)
ENDPOINTS_RED = [[0, 6], [2, 4], [2, 6]]
STARTPOINTS_RED = [[8, 2], [6, 2], [6, 4]]
ENDPOINTS_YELLOW = [[6, 4], [6, 6], [4, 6]]
STARTPOINTS_YELLOW = [[2, 2], [2, 4], [4, 2]]
ENDPOINTS_GREEN = [[6, 0], [6, 2], [4, 2]]
STARTPOINTS_GREEN = [[2, 8], [2, 6], [4, 6]]
AREAS = [ENDPOINTS_RED, STARTPOINTS_RED, ENDPOINTS_YELLOW, STARTPOINTS_YELLOW, ENDPOINTS_GREEN, STARTPOINTS_GREEN]
line_width = 3
# 0 1 2 3 4 5 6 7 8 9 10 11 12
START = np.array([[4, 4, 4, 4, 4, 4, 0, 4, 4], # 0
[4, 4, 4, 4, 4, 0, 0, 4, 4], # 1
[4, 4, 2, 2, 2, 0, 3, 3, 3], # 2
[4, 4, 2, 2, 0, 0, 3, 3, 4], # 3
[4, 4, 2, 0, 0, 0, 3, 4, 4], # 4
[4, 0, 0, 0, 0, 0, 0, 4, 4], # 5
[0, 0, 1, 1, 1, 0, 0, 4, 4], # 6
[4, 4, 1, 1, 4, 4, 4, 4, 4], # 7
[4, 4, 1, 4, 4, 4, 4, 4, 4]]).astype('int8') # 8
# one = myfont.render('1', False, (0, 0, 0))
# two = myfont.render('2', False, (0, 0, 0))
# three = myfont.render('3', False, (0, 0, 0))
# four = myfont.render('4', False, (0, 0, 0))
class GUI:
def __init__(self, timer):
pygame.init()
self.window = pygame.display.set_mode((524, 600))
self.window.fill(WHITE)
self.timer = timer
self.draw_areas(self.window)
self.draw_lines(self.window)
self.old_board = START
self.game = Game()
self.logic = self.game.get_board()
pygame.font.init()
self.myfont = pygame.font.SysFont('Arial', 15)
def draw_figure(self, surface, row, column, radius, color):
"""
draws piece on board
:param surface: surface to draw on
:param row: row of piece
:param column: columns of piece
:param radius: radius of circle in drawing
:param color: color of piece
"""
y = Y_OFFSET + row * Y_STEP
x = X_OFFSET + column * X_STEP + (row - 6) * X_STEP / 2
pygame.draw.circle(surface, color, (int(x),int(y)), radius)
def coordinates_to_pos(self, row, column):
"""
converts rows and columns to coordinates in the frame
"""
y = Y_OFFSET + row * Y_STEP
x = X_OFFSET + column * X_STEP + (row - 6) * X_STEP / 2
return y, x
def pos_to_board_coordinates(self, pos):
"""
converts coordinates to row and column
if the coordinates do not belong to the board, -1,-1 is returned
"""
(pos_x, pos_y) = pos
for row in range(DIM):
for column in range(DIM):
y, x = self.coordinates_to_pos(row, column)
if (pos_y - y)**2 + (pos_x - x) ** 2 < R ** 2:
return row, column
return -1, -1
def draw_line(self, surface, y1, x1, y2, x2, color):
pygame.draw.line(surface,color, (x1, y1), (x2, y2), line_width)
def draw_lines(self, surface):
"""
draws connection lines between fields
"""
for row in range(DIM):
for column in range(DIM):
for m in range(4):
n_y, n_x = row + MOVES[m][0], column + MOVES[m][1]
if n_y < DIM and n_x < DIM and n_y > -1 and n_x > -1:
if START[row,column] != 4 and START[n_y,n_x] != 4:
y1_pos, x1_pos = self.coordinates_to_pos(row, column)
y2_pos, x2_pos = self.coordinates_to_pos(n_y, n_x)
self.draw_line(surface, y1_pos, x1_pos, y2_pos, x2_pos, BLACK)
def draw_areas(self, surface):
"""
colors the areas of the board
"""
for area in range(6):
color = GREEN
if area < 4:
color = YELLOW
if area < 2:
color = RED
a = AREAS[area]
coordinates = [0] * 3
for p in range(3):
row, column = a[p]
y_c, x_c = self.coordinates_to_pos(row, column)
coordinates[p] = (x_c, y_c)
pygame.draw.polygon(surface, color, coordinates)
def draw_board(self, board, current_player, wait):
"""
draws board
:param board: current board
:param current_player: current player
:param wait: boolean that denotes if there should be a pause between displays
"""
for event in pygame.event.get():
pass
for y in range(DIM):
for x in range(DIM):
if board[y, x] in [0, 1, 2, 3]:
if board[y, x] == 0:
color = WHITE
elif board[y, x] == 1:
color = RED
elif board[y, x] == 2:
color = YELLOW
elif board[y, x] == 3:
color = GREEN
if self.old_board[y,x] == board[y,x]:
self.draw_figure(self.window, y, x, R, BLACK)
else:
self.draw_figure(self.window, y, x, R, PINK)
self.draw_figure(self.window, y, x, R-2, color)
# draw_figure(window, y, x, 2, BLACK)
step_display = self.myfont.render("Player " + str(current_player), False, (0, 0, 0))
pygame.draw.rect(self.window, WHITE, [10, 10, 50, 30])
if current_player >= 0:
self.window.blit(step_display, (10,10))
pygame.display.update()
self.old_board = board
if wait:
pygame.time.wait(self.timer)
# timer -= 1
def draw_possibles(self, possible_board):
"""
draws possible fields to move, when a piece was selected
"""
for y in range(DIM):
for x in range(DIM):
if possible_board[y, x] in [1, 2, 3]:
self.draw_figure(self.window, y, x, R, BLACK)
self.draw_figure(self.window, y, x, R-2, PINK)
pygame.display.update()
def get_action(self, board, player):
"""
:param board: board
:param player: current player
:return: returns action, after a human player chose it
"""
selected = False
possible_board = None
start_y, start_x, end_y, end_x = -1, -1, -1, -1
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
y, x = self.pos_to_board_coordinates(pos)
if y != -1:
if selected:
if possible_board[y, x] in [1, 2, 3]:
end_y, end_x = y, x
action = self.logic.get_action_by_coordinates(start_y, start_x, end_y, end_x, player)
return action
else:
selected = False
self.draw_board(board, player, False)
else:
if board[y, x] == player:
start_y, start_x = y, x
possible_board = self.game.get_possible_board(y, x, board, player)
self.draw_possibles(possible_board)
selected = True
def snapshot(self, board, filename):
"""
saves a screenshot of the game
"""
self.old_board = board
self.draw_board(board, -1, False)
pygame.image.save(self.window, filename)
| 35.441048 | 117 | 0.483859 |
b14a512886a1476bb60d31ae9e4c5a7c58c595cb | 422 | py | Python | src/medicineinventory/forms.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | src/medicineinventory/forms.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | src/medicineinventory/forms.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | # from django import forms
# #from django.forms import inlineformset_factory
# from .models import medicineinventory
# class MedicineInventoryForm(forms.ModelForm):
# class Meta:
# model = medicineinventory
# exclude = ()
# #MedicineInventoryFormSet = inlineformset_factory(medicineinventory,medicine_group,form=MedicineInventoryForm,fields=['medicine_groups','medicine_tax'],can_delete=True)
| 30.142857 | 174 | 0.763033 |
0bbbdb9954ca69ffd0cf92de7a7cbb7577cf8043 | 6,444 | py | Python | object_detection/inference_over_image.py | apacha/Mensural-Detector | d924a651bca5ccb97c7b45861b9ef5ef6e4cb26e | [
"MIT"
] | 9 | 2018-12-21T15:11:43.000Z | 2021-04-28T06:49:30.000Z | object_detection/inference_over_image.py | apacha/Mensural-Detector | d924a651bca5ccb97c7b45861b9ef5ef6e4cb26e | [
"MIT"
] | null | null | null | object_detection/inference_over_image.py | apacha/Mensural-Detector | d924a651bca5ccb97c7b45861b9ef5ef6e4cb26e | [
"MIT"
] | 6 | 2019-01-25T02:53:39.000Z | 2021-04-28T06:49:33.000Z | import numpy as np
import tensorflow as tf
import argparse
from PIL import Image
from object_detection.utils import ops as utils_ops, label_map_util, visualization_utils as vis_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def load_detection_graph(path_to_checkpoint):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_checkpoint, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_category_index(path_to_labels, number_of_classes):
# Load label map
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=number_of_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Performs detection over input image given a trained detector.')
parser.add_argument('--inference_graph', dest='inference_graph', type=str, required=True,
help='Path to the frozen inference graph.')
parser.add_argument('--label_map', dest='label_map', type=str, required=True,
help='Path to the label map, which is json-file that maps each category name to a unique number.',
default="mapping.txt")
parser.add_argument('--number_of_classes', dest='number_of_classes', type=int, default=32,
help='Number of classes.')
parser.add_argument('--input_image', dest='input_image', type=str, required=True, help='Path to the input image.')
parser.add_argument('--output_image', dest='output_image', type=str, default='detection.jpg',
help='Path to the output image.')
args = parser.parse_args()
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_CKPT = '/home/jcalvo/Escritorio/Current/Mensural Detector/mensural-detector/output_inference_graph.pb/frozen_inference_graph.pb'
path_to_frozen_inference_graph = args.inference_graph
path_to_labels = args.label_map
number_of_classes = args.number_of_classes
input_image = args.input_image
output_image = args.output_image
# Read frozen graph
detection_graph = load_detection_graph(path_to_frozen_inference_graph)
category_index = load_category_index(path_to_labels, number_of_classes)
image = Image.open(input_image)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=2)
Image.fromarray(image_np).save(output_image)
| 48.818182 | 142 | 0.676133 |
2bc5ae10c9688cdc52ee9e478845d7b52676a4ce | 1,737 | py | Python | src/sima/simo/blueprints/simostaticresultentry.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/simo/blueprints/simostaticresultentry.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/simo/blueprints/simostaticresultentry.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | #
# Generated with SIMOStaticResultEntryBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.resultentry import ResultEntryBlueprint
class SIMOStaticResultEntryBlueprint(ResultEntryBlueprint):
""""""
def __init__(self, name="SIMOStaticResultEntry", package_path="sima/simo", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("properties","sima/sima/Property","",True,Dimension("*")))
self.attributes.append(Attribute("resource","string","",default=""))
self.attributes.append(Attribute("relative","boolean","",default=False))
self.attributes.append(Attribute("changeNumber","integer","",default=0))
self.attributes.append(BlueprintAttribute("results","sima/sima/Result","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("entries","sima/sima/ResultEntry","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("lisFile","sima/simo/LisFile","",True))
self.attributes.append(BlueprintAttribute("simoLDAT","sima/sima/Result","",True))
self.attributes.append(BlueprintAttribute("staticResult","sima/simo/StaticResult","",True)) | 64.333333 | 121 | 0.734024 |
7cf57ad493897f31947a33a11de47c64d92f5e48 | 1,152 | py | Python | preprocessing/iaps.py | amysudarat/affective-monitor-model | 4de08704a3d36a0a228d5eeb9a33317be51f18fc | [
"MIT"
] | null | null | null | preprocessing/iaps.py | amysudarat/affective-monitor-model | 4de08704a3d36a0a228d5eeb9a33317be51f18fc | [
"MIT"
] | null | null | null | preprocessing/iaps.py | amysudarat/affective-monitor-model | 4de08704a3d36a0a228d5eeb9a33317be51f18fc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
import preprocessing.illum as pill
class iaps(object):
def __init__(self,filepath):
self.filepath = filepath
self.iaps_df = self.get_iaps_data()
self.iaps_df['pic_idx'] = self.iaps_df['pic_idx'].apply(lambda x:x-1)
def get_iaps_data(self):
filepath = self.filepath+'\\IAPSinfoFile_Final.txt'
iaps_df = pd.read_csv(filepath,header=None)
iaps_df.columns = ['pic_id','pic_idx','testsubject_idx','valence_m','arousal_m','valence_std','arousal_std','file_name']
return iaps_df
def get_pic_id(self,sample_idx):
idx = sample_idx%70
return self.iaps_df.loc[idx]['pic_id']
def get_sample_idx(self,pic_id):
idx = self.iaps_df[self.iaps_df['pic_id']==pic_id]['pic_idx'].values[0]
return [i*70+idx for i in range(51)]
def get_feeling(self,feeling):
"""
return sample index corresponding to the group of feeling
"""
filepath = self.filepath+'\\IAPS_selectedList_Final.csv'
feel_df = pd.read_csv(filepath,index_col=0)
return feel_df
| 32 | 128 | 0.637153 |
706e45b08f6ba5d21f2c85ab00a474fa6c3768f5 | 1,121 | py | Python | manage.py | inaki/farm-stand | 92d56fffae742775a8c9ccc91fbce39715caa0ab | [
"MIT"
] | 4 | 2015-08-20T02:43:35.000Z | 2022-03-28T12:37:17.000Z | manage.py | codeforamerica/westsac-farm-stand | 92d56fffae742775a8c9ccc91fbce39715caa0ab | [
"MIT"
] | 19 | 2015-07-31T23:32:09.000Z | 2015-08-31T23:58:40.000Z | manage.py | inaki/farm-stand | 92d56fffae742775a8c9ccc91fbce39715caa0ab | [
"MIT"
] | 4 | 2015-09-17T03:01:01.000Z | 2021-04-16T10:14:22.000Z | #!/usr/bin/venv python
import os
from app import create_app, db
from app.models import User, Role, Interestedpeople, Permission, Product
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User,Permission=Permission, Role=Role, Interestedpeople=Interestedpeople, Product=Product)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role, User, Product, Interestedpeople
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
if __name__ == '__main__':
manager.run()
| 26.069767 | 127 | 0.74041 |
c330bb605f330de4c899db39452af52560b91f32 | 1,987 | py | Python | tests/handlers/test_custom.py | xpayn/mangum | baa514d8631eabd70a2ce2111f3b5d525e5044c9 | [
"MIT"
] | 202 | 2019-01-14T16:59:33.000Z | 2020-05-26T11:46:59.000Z | tests/handlers/test_custom.py | xpayn/mangum | baa514d8631eabd70a2ce2111f3b5d525e5044c9 | [
"MIT"
] | 89 | 2019-01-19T00:31:20.000Z | 2020-05-31T09:58:32.000Z | tests/handlers/test_custom.py | xpayn/mangum | baa514d8631eabd70a2ce2111f3b5d525e5044c9 | [
"MIT"
] | 25 | 2019-02-28T00:10:21.000Z | 2020-05-26T19:52:29.000Z | from mangum.types import (
Scope,
Headers,
LambdaConfig,
LambdaContext,
LambdaEvent,
)
class CustomHandler:
@classmethod
def infer(
cls, event: LambdaEvent, context: LambdaContext, config: LambdaConfig
) -> bool:
return "my-custom-key" in event
def __init__(
self, event: LambdaEvent, context: LambdaContext, config: LambdaConfig
) -> None:
self.event = event
self.context = context
self.config = config
@property
def body(self) -> bytes:
return b"My request body"
@property
def scope(self) -> Scope:
headers = {}
return {
"type": "http",
"http_version": "1.1",
"method": "GET",
"headers": [[k.encode(), v.encode()] for k, v in headers.items()],
"path": "/",
"raw_path": None,
"root_path": "",
"scheme": "https",
"query_string": b"",
"server": ("mangum", 8080),
"client": ("127.0.0.1", 0),
"asgi": {"version": "3.0", "spec_version": "2.0"},
"aws.event": self.event,
"aws.context": self.context,
}
def __call__(self, *, status: int, headers: Headers, body: bytes) -> dict:
return {"statusCode": status, "headers": {}, "body": body.decode()}
def test_custom_handler():
event = {"my-custom-key": 1}
handler = CustomHandler(event, {}, {"api_gateway_base_path": "/"})
assert type(handler.body) == bytes
assert handler.scope == {
"asgi": {"version": "3.0", "spec_version": "2.0"},
"aws.context": {},
"aws.event": event,
"client": ("127.0.0.1", 0),
"headers": [],
"http_version": "1.1",
"method": "GET",
"path": "/",
"query_string": b"",
"raw_path": None,
"root_path": "",
"scheme": "https",
"server": ("mangum", 8080),
"type": "http",
}
| 27.597222 | 78 | 0.501761 |
9fde8560b25dd323cb82478e2bbed891f00119c0 | 5,887 | py | Python | jirani/migrations/0001_initial.py | albunus/nextdoor | 0be51a73f05c267e76a0e522f05efa88099b7522 | [
"MIT"
] | null | null | null | jirani/migrations/0001_initial.py | albunus/nextdoor | 0be51a73f05c267e76a0e522f05efa88099b7522 | [
"MIT"
] | null | null | null | jirani/migrations/0001_initial.py | albunus/nextdoor | 0be51a73f05c267e76a0e522f05efa88099b7522 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-24 02:59
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, max_length=10000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Nextdoor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('occupants_count', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jirani.location')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=100, null=True)),
('profile_pic', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jirani.location')),
('neighbourhood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jirani.nextdoor')),
('user', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField(blank=True, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='jirani.category')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jirani.location')),
('nextdoor', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='jirani.nextdoor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=50, null=True)),
('phone', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('neighbourhood', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='jirani.nextdoor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('nextdoor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jirani.nextdoor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 54.009174 | 132 | 0.602854 |
448b9b42e3bb379da0c2add1f80bcf13bdf4388c | 3,901 | py | Python | ssd/modeling/backbone/vgg.py | oudream/hello-ssd1 | 437615e66025cc8819cee33dd4dad687e5d2181b | [
"MIT"
] | null | null | null | ssd/modeling/backbone/vgg.py | oudream/hello-ssd1 | 437615e66025cc8819cee33dd4dad687e5d2181b | [
"MIT"
] | null | null | null | ssd/modeling/backbone/vgg.py | oudream/hello-ssd1 | 437615e66025cc8819cee33dd4dad687e5d2181b | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch
from ssd.layers import L2Norm
from ssd.modeling import registry
from ssd.utils.model_zoo import load_state_dict_from_url
model_urls = {
'vgg': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',
}
# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py
def add_vgg(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, size=300):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
if size == 512:
layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))
layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))
return layers
vgg_base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
}
extras_base = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],
}
class VGG(nn.Module):
def __init__(self, cfg):
super().__init__()
size = cfg.INPUT.IMAGE_SIZE
vgg_config = vgg_base[str(size)]
extras_config = extras_base[str(size)]
self.vgg = nn.ModuleList(add_vgg(vgg_config))
self.extras = nn.ModuleList(add_extras(extras_config, i=1024, size=size))
self.l2_norm = L2Norm(512, scale=20)
self.reset_parameters()
def reset_parameters(self):
for m in self.extras.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def init_from_pretrain(self, state_dict):
self.vgg.load_state_dict(state_dict)
def forward(self, x):
features = []
for i in range(23):
x = self.vgg[i](x)
s = self.l2_norm(x) # Conv4_3 L2 normalization
features.append(s)
# apply vgg up to fc7
for i in range(23, len(self.vgg)):
x = self.vgg[i](x)
features.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
features.append(x)
return tuple(features)
@registry.BACKBONES.register('vgg')
def vgg(cfg, pretrained=True):
model = VGG(cfg)
if pretrained:
model.init_from_pretrain(load_state_dict_from_url(model_urls['vgg']))
# model.init_from_pretrain(torch.load('vgg16_reducedfc.pth', map_location="cpu"))
# model.init_from_pretrain(torch.load('vgg_ssd512_voc0712.pth', map_location="cpu"))
return model
| 32.781513 | 109 | 0.588054 |
a1c21a01f823f3b6944ef1e502c36068fe5d42a1 | 6,703 | py | Python | docusign_esign/models/attachment.py | hunk/docusign-python-client | a643c42c1236715e74eef6fc279a1b29da1b5455 | [
"MIT"
] | null | null | null | docusign_esign/models/attachment.py | hunk/docusign-python-client | a643c42c1236715e74eef6fc279a1b29da1b5455 | [
"MIT"
] | null | null | null | docusign_esign/models/attachment.py | hunk/docusign-python-client | a643c42c1236715e74eef6fc279a1b29da1b5455 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Attachment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, access_control=None, attachment_id=None, attachment_type=None, data=None, label=None, name=None, remote_url=None):
"""
Attachment - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'access_control': 'str',
'attachment_id': 'str',
'attachment_type': 'str',
'data': 'str',
'label': 'str',
'name': 'str',
'remote_url': 'str'
}
self.attribute_map = {
'access_control': 'accessControl',
'attachment_id': 'attachmentId',
'attachment_type': 'attachmentType',
'data': 'data',
'label': 'label',
'name': 'name',
'remote_url': 'remoteUrl'
}
self._access_control = access_control
self._attachment_id = attachment_id
self._attachment_type = attachment_type
self._data = data
self._label = label
self._name = name
self._remote_url = remote_url
@property
def access_control(self):
"""
Gets the access_control of this Attachment.
:return: The access_control of this Attachment.
:rtype: str
"""
return self._access_control
@access_control.setter
def access_control(self, access_control):
"""
Sets the access_control of this Attachment.
:param access_control: The access_control of this Attachment.
:type: str
"""
self._access_control = access_control
@property
def attachment_id(self):
"""
Gets the attachment_id of this Attachment.
:return: The attachment_id of this Attachment.
:rtype: str
"""
return self._attachment_id
@attachment_id.setter
def attachment_id(self, attachment_id):
"""
Sets the attachment_id of this Attachment.
:param attachment_id: The attachment_id of this Attachment.
:type: str
"""
self._attachment_id = attachment_id
@property
def attachment_type(self):
"""
Gets the attachment_type of this Attachment.
Specifies the type of the attachment for the recipient.
:return: The attachment_type of this Attachment.
:rtype: str
"""
return self._attachment_type
@attachment_type.setter
def attachment_type(self, attachment_type):
"""
Sets the attachment_type of this Attachment.
Specifies the type of the attachment for the recipient.
:param attachment_type: The attachment_type of this Attachment.
:type: str
"""
self._attachment_type = attachment_type
@property
def data(self):
"""
Gets the data of this Attachment.
:return: The data of this Attachment.
:rtype: str
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this Attachment.
:param data: The data of this Attachment.
:type: str
"""
self._data = data
@property
def label(self):
"""
Gets the label of this Attachment.
:return: The label of this Attachment.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this Attachment.
:param label: The label of this Attachment.
:type: str
"""
self._label = label
@property
def name(self):
"""
Gets the name of this Attachment.
:return: The name of this Attachment.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Attachment.
:param name: The name of this Attachment.
:type: str
"""
self._name = name
@property
def remote_url(self):
"""
Gets the remote_url of this Attachment.
:return: The remote_url of this Attachment.
:rtype: str
"""
return self._remote_url
@remote_url.setter
def remote_url(self, remote_url):
"""
Sets the remote_url of this Attachment.
:param remote_url: The remote_url of this Attachment.
:type: str
"""
self._remote_url = remote_url
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.734317 | 137 | 0.548262 |
ee25738de4d820d3a15e5c6e5d3abbad64250da3 | 2,854 | py | Python | pymystrom/switch.py | d0xsch/python-mystrom | 86410f8952104651ef76ad37c84c29740c50551e | [
"MIT"
] | null | null | null | pymystrom/switch.py | d0xsch/python-mystrom | 86410f8952104651ef76ad37c84c29740c50551e | [
"MIT"
] | null | null | null | pymystrom/switch.py | d0xsch/python-mystrom | 86410f8952104651ef76ad37c84c29740c50551e | [
"MIT"
] | null | null | null | """
Copyright (c) 2015-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import requests
from . import exceptions
class MyStromPlug(object):
"""A class for a myStrom switch."""
def __init__(self, host):
"""Initialize the switch."""
self.resource = 'http://{}'.format(host)
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.temperature = 0
def set_relay_on(self):
"""Turn the relay on."""
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def get_status(self):
"""Get the details from the switch."""
try:
request = requests.get(
'{}/report'.format(self.resource), timeout=self.timeout)
self.data = request.json()
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_relay_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['relay']
except TypeError:
self.state = False
return bool(self.state)
def get_consumption(self):
"""Get current power consumption in mWh."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_temperature(self):
"""Get current temperature in celsius."""
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch()
| 32.431818 | 93 | 0.568325 |
267cfc564994c38411f2bd7c19af348ceff72538 | 807 | py | Python | app/resources/product_group/models.py | zalando-zmon/zmon-slo-metrics | f2e823b861af2f449d1fd72cced74cf26b3aee94 | [
"Apache-2.0"
] | 8 | 2017-02-21T09:45:01.000Z | 2020-09-18T00:09:42.000Z | app/resources/product_group/models.py | ThorbjoernG/service-level-reporting | d9ec7dca3fee1614bf39cb46af9c0cc8bfbd805e | [
"Apache-2.0"
] | 148 | 2017-02-20T08:52:32.000Z | 2020-03-10T09:43:40.000Z | app/resources/product_group/models.py | lfroment0/service-level-reporting | 29d6d0664762c76eb5aa7000a8c191c32cc2c015 | [
"Apache-2.0"
] | 12 | 2017-02-20T07:24:21.000Z | 2019-09-27T12:32:33.000Z | from datetime import datetime
from app.extensions import db
class ProductGroup(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(120), unique=True, nullable=False, index=True)
department = db.Column(db.String(120), default='')
products = db.relationship('Product', backref=db.backref('product_group', lazy='joined'), lazy='dynamic')
slug = db.Column(db.String(120), unique=True, nullable=False, index=True)
username = db.Column(db.String(120), default='')
created = db.Column(db.DateTime(), default=datetime.utcnow)
updated = db.Column(db.DateTime(), onupdate=datetime.utcnow, default=datetime.utcnow)
def get_owner(self):
return self.name
def __repr__(self):
return '<Product group: {}>'.format(self.name)
| 32.28 | 109 | 0.692689 |
021c448226c618bcb5c2298dd85a4869dd001fff | 2,844 | py | Python | eval.py | bhsimon0810/gaussian-mixture-vae | c37163c96f8c51f36d66addec8e00ad204a2d730 | [
"MIT"
] | null | null | null | eval.py | bhsimon0810/gaussian-mixture-vae | c37163c96f8c51f36d66addec8e00ad204a2d730 | [
"MIT"
] | null | null | null | eval.py | bhsimon0810/gaussian-mixture-vae | c37163c96f8c51f36d66addec8e00ad204a2d730 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import os
import utils
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
flags = tf.app.flags
flags.DEFINE_string('data_dir', './datasets', 'Data dir path.')
flags.DEFINE_string('dataset', 'imdb', 'Data dir path.')
flags.DEFINE_string('checkpoint_dir', './ckpt', 'Data dir path.')
flags.DEFINE_integer('batch_size', 64, 'Batch size.')
flags.DEFINE_integer('n_class', 2, 'Size of stochastic vector.')
flags.DEFINE_integer('vocab_size', 10000, 'Vocabulary size.')
FLAGS = flags.FLAGS
glove_url = os.path.join(FLAGS.data_dir, 'glove.6B.100d.txt')
data_dir = os.path.join(FLAGS.data_dir, FLAGS.dataset)
vocab_url = os.path.join(data_dir, 'vocab.new')
word_dict = utils.build_vocab(vocab_url)
test_url = os.path.join(data_dir, 'test.feat')
test_target, test_set, test_count = utils.data_set(test_url)
test_batches = utils.create_batches(len(test_set), FLAGS.batch_size, shuffle=False)
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
sess = tf.compat.v1.Session()
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.compat.v1.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
x_placeholder = graph.get_operation_by_name("input").outputs[0]
y_placeholder = graph.get_operation_by_name("output").outputs[0]
# mask_placeholder = graph.get_operation_by_name("label_mask").outputs[0]
batch_size_placeholder = graph.get_operation_by_name("batch_size").outputs[0]
# Tensors we want to evaluate
y_hat_tensor = graph.get_tensor_by_name("classifier/Softmax:0")
test_preds = []
for idx_batch in test_batches:
target_batch, data_batch, count_batch, num_example = utils.fetch_data(
test_target, test_set, test_count, idx_batch, FLAGS.n_class, FLAGS.vocab_size)
# mask = np.array([1.0] * num_example)
input_feed = {x_placeholder: data_batch, y_placeholder: target_batch, batch_size_placeholder: num_example}
y_hat = sess.run(y_hat_tensor, input_feed)
predictions = np.argmax(y_hat, axis=1)
test_preds = np.concatenate([test_preds, predictions])
print("###################### Classification Report ######################")
print(classification_report(test_target, test_preds))
print("###################### Macro Average ######################")
print(precision_score(test_target, test_preds, average='macro'))
print(recall_score(test_target, test_preds, average='macro'))
print(f1_score(test_target, test_preds, average='macro'))
| 46.622951 | 119 | 0.691983 |
1f0208cbd5625a47cda1eaebd1f70bd54062df4b | 512 | py | Python | teacher/migrations/0011_action_week.py | PietPtr/UoM | 9b67fb01084ed984b5171cd4d201528c926571b6 | [
"MIT"
] | null | null | null | teacher/migrations/0011_action_week.py | PietPtr/UoM | 9b67fb01084ed984b5171cd4d201528c926571b6 | [
"MIT"
] | null | null | null | teacher/migrations/0011_action_week.py | PietPtr/UoM | 9b67fb01084ed984b5171cd4d201528c926571b6 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-09 20:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('teacher', '0010_auto_20201109_2013'),
]
operations = [
migrations.AddField(
model_name='action',
name='week',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='teacher.week'),
preserve_default=False,
),
]
| 24.380952 | 111 | 0.632813 |
aa84eae5247bf623186a12c41ac36830a2d6e9a9 | 12,947 | py | Python | src/bio2bel/manager/abstract_manager.py | COVID-19-Causal-Reasoning/bio2bel | f2c015c23e9e1f4b996716ec48f61687c5e347fe | [
"MIT"
] | 16 | 2018-05-18T13:25:44.000Z | 2022-03-15T02:32:28.000Z | src/bio2bel/manager/abstract_manager.py | COVID-19-Causal-Reasoning/bio2bel | f2c015c23e9e1f4b996716ec48f61687c5e347fe | [
"MIT"
] | 42 | 2017-09-13T20:16:46.000Z | 2021-05-08T19:24:30.000Z | src/bio2bel/manager/abstract_manager.py | COVID-19-Causal-Reasoning/bio2bel | f2c015c23e9e1f4b996716ec48f61687c5e347fe | [
"MIT"
] | 5 | 2020-03-14T17:08:12.000Z | 2021-04-13T20:19:19.000Z | # -*- coding: utf-8 -*-
"""Provides abstractions over the management of SQLAlchemy connections and sessions."""
import logging
import os
import sys
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import List, Mapping, Type
import click
from more_click import verbose_option
from sqlalchemy.ext.declarative.api import DeclarativeMeta
from .cli_manager import CliMixin
from .connection_manager import ConnectionManager
from ..utils import _get_managers, clear_cache, get_data_dir
__all__ = [
'AbstractManager',
'get_bio2bel_manager_classes',
]
log = logging.getLogger(__name__)
class AbstractManagerMeta(ABCMeta):
"""Crazy metaclass to hack in a decorator to the populate function."""
def __new__(mcs, name, bases, namespace, **kwargs): # noqa: N804
cls = super().__new__(mcs, name, bases, namespace, **kwargs)
cls._populate_original = cls.populate
@wraps(cls._populate_original)
def populate_wrapped(self, *populate_args, **populate_kwargs):
"""Populate the database."""
try:
cls._populate_original(self, *populate_args, **populate_kwargs)
except Exception:
self._store_populate_failed()
raise
else:
# Hack in the action storage
self._store_populate()
cls.populate = populate_wrapped
return cls
class AbstractManager(ConnectionManager, CliMixin, metaclass=AbstractManagerMeta):
"""This is a base class for implementing your own Bio2BEL manager.
It already includes functions to handle configuration, construction of a connection to a database using SQLAlchemy,
creation of the tables defined by your own :func:`sqlalchemy.ext.declarative.declarative_base`, and has hooks to
override that populate and make simple queries to the database. Since :class:`AbstractManager` inherits from
:class:`abc.ABC` and is therefore an abstract class, there are a few class variables, functions, and properties
that need to be overridden.
**Overriding the Module Name**
First, the class-level variable ``module_name`` must be set to a string corresponding to the name of the data
source.
.. code-block:: python
from bio2bel import AbstractManager
class Manager(AbstractManager):
module_name = 'mirtarbase' # note: use lower case module names
In general, this should also correspond to the same value as ``MODULE_NAME`` set in ``constants.py`` and can also
be set with an assignment to this value
.. code-block:: python
from bio2bel import AbstractManager
from .constants import MODULE_NAME
class Manager(AbstractManager):
module_name = MODULE_NAME
**Setting the Declarative Base**
Building on the previous example, the (private) abstract property :data:`bio2bel.AbstractManager._base` must be
overridden to return the value from your :func:`sqlalchemy.ext.declarative.declarative_base`. We chose to make this
an instance-level property instead of a class-level variable so each manager could have its own information about
connections to the database.
As a minimal example:
.. code-block:: python
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from bio2bel import AbstractManager
Base: DeclarativeMeta = declarative_base()
class Manager(AbstractManager):
module_name = 'mirtarbase' # note: use lower case module names
@property
def _base(self) -> DeclarativeMeta:
return Base
In general, the models should be defined in a module called ``models.py`` so the ``Base`` can also be imported.
.. code-block:: python
from sqlalchemy.ext.declarative import DeclarativeMeta
from bio2bel import AbstractManager
from .constants import MODULE_NAME
from .models import Base
class Manager(AbstractManager):
module_name = MODULE_NAME
@property
def _base(self) -> DeclarativeMeta:
return Base
**Populating the Database**
Deciding how to populate the database using your SQLAlchemy models is incredibly creative and can't be given a good
example without checking real code. See the previously mentioned `implementation of a Manager
<https://github.com/bio2bel/mirtarbase/blob/master/src/bio2bel_mirtarbase/manager.py>`_.
.. code-block:: python
from sqlalchemy.ext.declarative import DeclarativeMeta
from bio2bel import AbstractManager
from .constants import MODULE_NAME
from .models import Base
class Manager(AbstractManager):
module_name = MODULE_NAME
@property
def _base(self) -> DeclarativeMeta:
return Base
def populate(self) -> None:
...
**Checking the Database is Populated**
A method for checking if the database has been populated already must be implemented as well. The easiest way to
implement this is to check that there's a non-zero count of whatever the most important model in the database is.
.. code-block:: python
from sqlalchemy.ext.declarative import DeclarativeMeta
from bio2bel import AbstractManager
from .constants import MODULE_NAME
from .models import Base
class Manager(AbstractManager):
module_name = MODULE_NAME
@property
def _base(self) -> DeclarativeMeta:
return Base
def populate(self) -> None:
...
def is_populated(self) -> bool:
return 0 < self.session.query(MyImportantModel).count()
There are several mixins that can be optionally inherited:
1. :py:class:`bio2bel.manager.flask_manager.FlaskMixin`: the Flask Mixin creates a Flask-Admin web application.
2. :py:class:`bio2bel.manager.namespace_manager.BELNamespaceManagerMixin`: the BEL Namespace Manager Mixin exports
a BEL namespace and interact with PyBEL.
3. :py:class:`bio2bel.manager.bel_manager.BELManagerMixin`: the BEL Manager Mixin exports a BEL script
and interact with PyBEL.
"""
@property
@abstractmethod
def _base(self) -> DeclarativeMeta:
"""Return the declarative base.
It is usually sufficient to return an instance that is module-level.
How to build an instance of :class:`sqlalchemy.ext.declarative.api.DeclarativeMeta` by using
:func:`sqlalchemy.ext.declarative.declarative_base`:
>>> from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
>>> Base: DeclarativeMeta = declarative_base()
Then just override this abstract property like:
>>> @property
>>> def _base(self) -> DeclarativeMeta:
>>> return Base
Note that this property could effectively also be a static method.
"""
def __init__(self, *args, **kwargs): # noqa: D107
super().__init__(*args, **kwargs)
self.create_all()
@abstractmethod
def is_populated(self) -> bool:
"""Check if the database is already populated."""
@abstractmethod
def populate(self, *args, **kwargs) -> None:
"""Populate the database."""
@abstractmethod
def summarize(self) -> Mapping[str, int]:
"""Summarize the database."""
@property
def _metadata(self):
"""Return the metadata object associated with this manager's declarative base."""
return self._base.metadata
def create_all(self, check_first: bool = True):
"""Create the empty database (tables).
:param bool check_first: Defaults to True, don't issue CREATEs for tables already present
in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.create_all`
"""
self._metadata.create_all(self.engine, checkfirst=check_first)
def drop_all(self, check_first: bool = True):
"""Drop all tables from the database.
:param bool check_first: Defaults to True, only issue DROPs for tables confirmed to be
present in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.drop_all`
"""
self._metadata.drop_all(self.engine, checkfirst=check_first)
self._store_drop()
def _get_query(self, model):
"""Get a query for the given model using this manager's session.
:param model: A SQLAlchemy model class
:return: a SQLAlchemy query
"""
return self.session.query(model)
def _count_model(self, model) -> int:
"""Count the number of the given model in the database.
:param model: A SQLAlchemy model class
"""
return self._get_query(model).count()
def _list_model(self, model) -> List:
"""Get all instances of the given model in the database.
:param model: A SQLAlchemy model class
"""
return self._get_query(model).all()
@staticmethod
def _cli_add_populate(main: click.Group) -> click.Group:
"""Add the populate command."""
return add_cli_populate(main)
@staticmethod
def _cli_add_drop(main: click.Group) -> click.Group:
"""Add the drop command."""
return add_cli_drop(main)
@staticmethod
def _cli_add_cache(main: click.Group) -> click.Group:
"""Add the cache command."""
return add_cli_cache(main)
@staticmethod
def _cli_add_summarize(main: click.Group) -> click.Group:
"""Add the summarize command."""
return add_cli_summarize(main)
@classmethod
def get_cli(cls) -> click.Group:
"""Get the :mod:`click` main function to use as a command line interface."""
main = super().get_cli()
cls._cli_add_populate(main)
cls._cli_add_drop(main)
cls._cli_add_cache(main)
cls._cli_add_summarize(main)
return main
def add_cli_populate(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``populate`` command to main :mod:`click` function."""
@main.command()
@click.option('-r', '--reset', is_flag=True, help='Nuke database first')
@click.option('-f', '--force', is_flag=True, help='Force overwrite if already populated')
@verbose_option
@click.pass_obj
def populate(manager: AbstractManager, reset, force):
"""Populate the database."""
if reset:
click.echo('Deleting the previous instance of the database')
manager.drop_all()
click.echo('Creating new models')
manager.create_all()
if manager.is_populated() and not force:
click.echo('Database already populated. Use --force to overwrite')
sys.exit(0)
manager.populate()
return main
def add_cli_drop(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``drop`` command to main :mod:`click` function."""
@main.command()
@verbose_option
@click.confirmation_option(prompt='Are you sure you want to drop the db?')
@click.pass_obj
def drop(manager):
"""Drop the database."""
manager.drop_all()
return main
def add_cli_cache(main: click.Group) -> click.Group: # noqa: D202
"""Add several commands to main :mod:`click` function for handling the cache."""
@main.group()
def cache():
"""Manage cached data."""
@cache.command()
@verbose_option
@click.pass_obj
def locate(manager):
"""Print the location of the data directory."""
data_dir = get_data_dir(manager.module_name)
click.echo(data_dir)
@cache.command()
@verbose_option
@click.pass_obj
def ls(manager):
"""List files in the cache."""
data_dir = get_data_dir(manager.module_name)
for path in os.listdir(data_dir):
click.echo(path)
@cache.command()
@verbose_option
@click.pass_obj
def clear(manager):
"""Clear all files from the cache."""
clear_cache(manager.module_name)
return main
def add_cli_summarize(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``summarize`` command to main :mod:`click` function."""
@main.command()
@verbose_option
@click.pass_obj
def summarize(manager: AbstractManager):
"""Summarize the contents of the database."""
if not manager.is_populated():
click.secho(f'{manager.module_name} has not been populated', fg='red')
sys.exit(1)
for name, count in sorted(manager.summarize().items()):
click.echo(f'{name.capitalize()}: {count}')
return main
def get_bio2bel_manager_classes() -> Mapping[str, Type[AbstractManager]]:
"""Get all Bio2BEL manager classes."""
return dict(_get_managers('bio2bel'))
| 32.448622 | 119 | 0.660848 |
6100770744996b27acefbbc01e199ba1dfb3aa1f | 1,996 | py | Python | backend/api/migrations/0009_annotations_relations_20210421_1445.py | alairice/doccano | 27eff5caec1ec6ad31f1e74bd1b73b1dd43228dc | [
"MIT"
] | 2,082 | 2018-05-09T07:16:21.000Z | 2019-12-01T16:41:50.000Z | backend/api/migrations/0009_annotations_relations_20210421_1445.py | alairice/doccano | 27eff5caec1ec6ad31f1e74bd1b73b1dd43228dc | [
"MIT"
] | 365 | 2018-07-31T13:49:05.000Z | 2019-11-29T11:25:17.000Z | backend/api/migrations/0009_annotations_relations_20210421_1445.py | alairice/doccano | 27eff5caec1ec6ad31f1e74bd1b73b1dd43228dc | [
"MIT"
] | 476 | 2018-08-17T06:43:57.000Z | 2019-12-01T09:47:08.000Z | # Generated by Django 3.1.6 on 2021-03-02 10:13
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0008_auto_20210302_1013"),
]
operations = [
migrations.CreateModel( # id_autogen, annotation_id_1, annotation_id_2, type, author, timestamp
name="RelationTypes",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("name", models.TextField(max_length=50)),
("color", models.TextField(max_length=20)),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name="relation_types", to="api.Project"
),
),
],
),
migrations.CreateModel( # id_autogen, annotation_id_1, annotation_id_2, type, author, timestamp
name="AnnotationRelations",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("annotation_id_1", models.IntegerField()),
("annotation_id_2", models.IntegerField()),
("type", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="api.RelationTypes")),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
("timestamp", models.DateTimeField(auto_now_add=True)),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="annotation_relations",
to="api.Project",
),
),
],
),
]
| 40.734694 | 118 | 0.553607 |
165b0b51a97f6e6abd2bbaeef7e6c5f5c583eb99 | 1,210 | py | Python | source/tests/tester_text_files.py | dibala21/Ergocycle | a1562d722b4348743028f64cd5cb83906cf44207 | [
"MIT"
] | null | null | null | source/tests/tester_text_files.py | dibala21/Ergocycle | a1562d722b4348743028f64cd5cb83906cf44207 | [
"MIT"
] | null | null | null | source/tests/tester_text_files.py | dibala21/Ergocycle | a1562d722b4348743028f64cd5cb83906cf44207 | [
"MIT"
] | null | null | null | import numpy as np
import datetime
#def create_file(file_name, param_matrix):
#f = open(file_name, "w+")
#for row in range(param_matrix):
#f.write("amplitude: ", param_matrix[row][0])
#f.write("")
#matrice_inventee = [[100, 50, 20], [101, 51, 21]]
#nouveau_patient = create_file()
#fichier = create_file(test_user1, C:\Users\frede\Desktop\enregistrements
file_object = open("test2", "w+")
matrice = np.array([[10, 20, 30], [40, 50, 60]])
for row in matrice:
for element in row:
valeur=str(element)
#if element
file_object.write(" Amplitude (mA): "+ valeur)
file_object.close
#for i in range(10):
# file_object.write("This is line %d\r\n" % (i+1))
#amplitude : self.start_parameters[1,:]
#frequency : self.start_parameters[2,:]
#length impulsion: self.start_parameters[3,:]
file_object1 = open("InstructionWindow")
matrix = np.array([[" ","electrode 1", "electrode 2","electrode 3","electrode 4","electrode 5","electrode 6","electrode 7","electrode 8"],["Amplitude (mA)", self.start_parameters[1,:]],["Frequency (Hz)", self.start_parameters[2,:]], ["Impulsion length (ms)", self.start_paramters[3,:]]])
file_object.write(str(datetime.now()))
file_object.write(matrix)
| 36.666667 | 287 | 0.684298 |
3c8bf7f31e604e1eb0dcec59e14be35573da1667 | 19,945 | py | Python | redis/commands/bf/commands.py | hartwork/redis-py | 291baa93b8712d104ce50a61f52e23b68e2b7a99 | [
"MIT"
] | null | null | null | redis/commands/bf/commands.py | hartwork/redis-py | 291baa93b8712d104ce50a61f52e23b68e2b7a99 | [
"MIT"
] | null | null | null | redis/commands/bf/commands.py | hartwork/redis-py | 291baa93b8712d104ce50a61f52e23b68e2b7a99 | [
"MIT"
] | null | null | null | from redis.client import NEVER_DECODE
from redis.exceptions import ModuleError
from redis.utils import HIREDIS_AVAILABLE
BF_RESERVE = "BF.RESERVE"
BF_ADD = "BF.ADD"
BF_MADD = "BF.MADD"
BF_INSERT = "BF.INSERT"
BF_EXISTS = "BF.EXISTS"
BF_MEXISTS = "BF.MEXISTS"
BF_SCANDUMP = "BF.SCANDUMP"
BF_LOADCHUNK = "BF.LOADCHUNK"
BF_INFO = "BF.INFO"
CF_RESERVE = "CF.RESERVE"
CF_ADD = "CF.ADD"
CF_ADDNX = "CF.ADDNX"
CF_INSERT = "CF.INSERT"
CF_INSERTNX = "CF.INSERTNX"
CF_EXISTS = "CF.EXISTS"
CF_DEL = "CF.DEL"
CF_COUNT = "CF.COUNT"
CF_SCANDUMP = "CF.SCANDUMP"
CF_LOADCHUNK = "CF.LOADCHUNK"
CF_INFO = "CF.INFO"
CMS_INITBYDIM = "CMS.INITBYDIM"
CMS_INITBYPROB = "CMS.INITBYPROB"
CMS_INCRBY = "CMS.INCRBY"
CMS_QUERY = "CMS.QUERY"
CMS_MERGE = "CMS.MERGE"
CMS_INFO = "CMS.INFO"
TOPK_RESERVE = "TOPK.RESERVE"
TOPK_ADD = "TOPK.ADD"
TOPK_INCRBY = "TOPK.INCRBY"
TOPK_QUERY = "TOPK.QUERY"
TOPK_COUNT = "TOPK.COUNT"
TOPK_LIST = "TOPK.LIST"
TOPK_INFO = "TOPK.INFO"
TDIGEST_CREATE = "TDIGEST.CREATE"
TDIGEST_RESET = "TDIGEST.RESET"
TDIGEST_ADD = "TDIGEST.ADD"
TDIGEST_MERGE = "TDIGEST.MERGE"
TDIGEST_CDF = "TDIGEST.CDF"
TDIGEST_QUANTILE = "TDIGEST.QUANTILE"
TDIGEST_MIN = "TDIGEST.MIN"
TDIGEST_MAX = "TDIGEST.MAX"
TDIGEST_INFO = "TDIGEST.INFO"
class BFCommands:
"""RedisBloom commands."""
# region Bloom Filter Functions
def create(self, key, errorRate, capacity, expansion=None, noScale=None):
"""
Create a new Bloom Filter `key` with desired probability of false positives
`errorRate` expected entries to be inserted as `capacity`.
Default expansion value is 2. By default, filter is auto-scaling.
For more information see `BF.RESERVE <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfreserve>`_.
""" # noqa
params = [key, errorRate, capacity]
self.appendExpansion(params, expansion)
self.appendNoScale(params, noScale)
return self.execute_command(BF_RESERVE, *params)
def add(self, key, item):
"""
Add to a Bloom Filter `key` an `item`.
For more information see `BF.ADD <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfadd>`_.
""" # noqa
params = [key, item]
return self.execute_command(BF_ADD, *params)
def madd(self, key, *items):
"""
Add to a Bloom Filter `key` multiple `items`.
For more information see `BF.MADD <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfmadd>`_.
""" # noqa
params = [key]
params += items
return self.execute_command(BF_MADD, *params)
def insert(
self,
key,
items,
capacity=None,
error=None,
noCreate=None,
expansion=None,
noScale=None,
):
"""
Add to a Bloom Filter `key` multiple `items`.
If `nocreate` remain `None` and `key` does not exist, a new Bloom Filter
`key` will be created with desired probability of false positives `errorRate`
and expected entries to be inserted as `size`.
For more information see `BF.INSERT <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfinsert>`_.
""" # noqa
params = [key]
self.appendCapacity(params, capacity)
self.appendError(params, error)
self.appendExpansion(params, expansion)
self.appendNoCreate(params, noCreate)
self.appendNoScale(params, noScale)
self.appendItems(params, items)
return self.execute_command(BF_INSERT, *params)
def exists(self, key, item):
"""
Check whether an `item` exists in Bloom Filter `key`.
For more information see `BF.EXISTS <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfexists>`_.
""" # noqa
params = [key, item]
return self.execute_command(BF_EXISTS, *params)
def mexists(self, key, *items):
"""
Check whether `items` exist in Bloom Filter `key`.
For more information see `BF.MEXISTS <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfmexists>`_.
""" # noqa
params = [key]
params += items
return self.execute_command(BF_MEXISTS, *params)
def scandump(self, key, iter):
"""
Begin an incremental save of the bloom filter `key`.
This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model.
The first time this command is called, the value of `iter` should be 0.
This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
For more information see `BF.SCANDUMP <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfscandump>`_.
""" # noqa
if HIREDIS_AVAILABLE:
raise ModuleError("This command cannot be used when hiredis is available.")
params = [key, iter]
options = {}
options[NEVER_DECODE] = []
return self.execute_command(BF_SCANDUMP, *params, **options)
def loadchunk(self, key, iter, data):
"""
Restore a filter previously saved using SCANDUMP.
See the SCANDUMP command for example usage.
This command will overwrite any bloom filter stored under key.
Ensure that the bloom filter will not be modified between invocations.
For more information see `BF.LOADCHUNK <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfloadchunk>`_.
""" # noqa
params = [key, iter, data]
return self.execute_command(BF_LOADCHUNK, *params)
def info(self, key):
"""
Return capacity, size, number of filters, number of items inserted, and expansion rate.
For more information see `BF.INFO <https://oss.redis.com/redisbloom/master/Bloom_Commands/#bfinfo>`_.
""" # noqa
return self.execute_command(BF_INFO, key)
class CFCommands:
# region Cuckoo Filter Functions
def create(
self, key, capacity, expansion=None, bucket_size=None, max_iterations=None
):
"""
Create a new Cuckoo Filter `key` an initial `capacity` items.
For more information see `CF.RESERVE <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfreserve>`_.
""" # noqa
params = [key, capacity]
self.appendExpansion(params, expansion)
self.appendBucketSize(params, bucket_size)
self.appendMaxIterations(params, max_iterations)
return self.execute_command(CF_RESERVE, *params)
def add(self, key, item):
"""
Add an `item` to a Cuckoo Filter `key`.
For more information see `CF.ADD <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfadd>`_.
""" # noqa
params = [key, item]
return self.execute_command(CF_ADD, *params)
def addnx(self, key, item):
"""
Add an `item` to a Cuckoo Filter `key` only if item does not yet exist.
Command might be slower that `add`.
For more information see `CF.ADDNX <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfaddnx>`_.
""" # noqa
params = [key, item]
return self.execute_command(CF_ADDNX, *params)
def insert(self, key, items, capacity=None, nocreate=None):
"""
Add multiple `items` to a Cuckoo Filter `key`, allowing the filter
to be created with a custom `capacity` if it does not yet exist.
`items` must be provided as a list.
For more information see `CF.INSERT <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfinsert>`_.
""" # noqa
params = [key]
self.appendCapacity(params, capacity)
self.appendNoCreate(params, nocreate)
self.appendItems(params, items)
return self.execute_command(CF_INSERT, *params)
def insertnx(self, key, items, capacity=None, nocreate=None):
"""
Add multiple `items` to a Cuckoo Filter `key` only if they do not exist yet,
allowing the filter to be created with a custom `capacity` if it does not yet exist.
`items` must be provided as a list.
For more information see `CF.INSERTNX <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfinsertnx>`_.
""" # noqa
params = [key]
self.appendCapacity(params, capacity)
self.appendNoCreate(params, nocreate)
self.appendItems(params, items)
return self.execute_command(CF_INSERTNX, *params)
def exists(self, key, item):
"""
Check whether an `item` exists in Cuckoo Filter `key`.
For more information see `CF.EXISTS <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfexists>`_.
""" # noqa
params = [key, item]
return self.execute_command(CF_EXISTS, *params)
def delete(self, key, item):
"""
Delete `item` from `key`.
For more information see `CF.DEL <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfdel>`_.
""" # noqa
params = [key, item]
return self.execute_command(CF_DEL, *params)
def count(self, key, item):
"""
Return the number of times an `item` may be in the `key`.
For more information see `CF.COUNT <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfcount>`_.
""" # noqa
params = [key, item]
return self.execute_command(CF_COUNT, *params)
def scandump(self, key, iter):
"""
Begin an incremental save of the Cuckoo filter `key`.
This is useful for large Cuckoo filters which cannot fit into the normal
SAVE and RESTORE model.
The first time this command is called, the value of `iter` should be 0.
This command will return successive (iter, data) pairs until
(0, NULL) to indicate completion.
For more information see `CF.SCANDUMP <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfscandump>`_.
""" # noqa
params = [key, iter]
return self.execute_command(CF_SCANDUMP, *params)
def loadchunk(self, key, iter, data):
"""
Restore a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage.
This command will overwrite any Cuckoo filter stored under key.
Ensure that the Cuckoo filter will not be modified between invocations.
For more information see `CF.LOADCHUNK <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfloadchunk>`_.
""" # noqa
params = [key, iter, data]
return self.execute_command(CF_LOADCHUNK, *params)
def info(self, key):
"""
Return size, number of buckets, number of filter, number of items inserted,
number of items deleted, bucket size, expansion rate, and max iteration.
For more information see `CF.INFO <https://oss.redis.com/redisbloom/master/Cuckoo_Commands/#cfinfo>`_.
""" # noqa
return self.execute_command(CF_INFO, key)
class TOPKCommands:
def reserve(self, key, k, width, depth, decay):
"""
Create a new Top-K Filter `key` with desired probability of false
positives `errorRate` expected entries to be inserted as `size`.
For more information see `TOPK.RESERVE <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkreserve>`_.
""" # noqa
params = [key, k, width, depth, decay]
return self.execute_command(TOPK_RESERVE, *params)
def add(self, key, *items):
"""
Add one `item` or more to a Top-K Filter `key`.
For more information see `TOPK.ADD <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkadd>`_.
""" # noqa
params = [key]
params += items
return self.execute_command(TOPK_ADD, *params)
def incrby(self, key, items, increments):
"""
Add/increase `items` to a Top-K Sketch `key` by ''increments''.
Both `items` and `increments` are lists.
For more information see `TOPK.INCRBY <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkincrby>`_.
Example:
>>> topkincrby('A', ['foo'], [1])
""" # noqa
params = [key]
self.appendItemsAndIncrements(params, items, increments)
return self.execute_command(TOPK_INCRBY, *params)
def query(self, key, *items):
"""
Check whether one `item` or more is a Top-K item at `key`.
For more information see `TOPK.QUERY <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkquery>`_.
""" # noqa
params = [key]
params += items
return self.execute_command(TOPK_QUERY, *params)
def count(self, key, *items):
"""
Return count for one `item` or more from `key`.
For more information see `TOPK.COUNT <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkcount>`_.
""" # noqa
params = [key]
params += items
return self.execute_command(TOPK_COUNT, *params)
def list(self, key, withcount=False):
"""
Return full list of items in Top-K list of `key`.
If `withcount` set to True, return full list of items
with probabilistic count in Top-K list of `key`.
For more information see `TOPK.LIST <https://oss.redis.com/redisbloom/master/TopK_Commands/#topklist>`_.
""" # noqa
params = [key]
if withcount:
params.append("WITHCOUNT")
return self.execute_command(TOPK_LIST, *params)
def info(self, key):
"""
Return k, width, depth and decay values of `key`.
For more information see `TOPK.INFO <https://oss.redis.com/redisbloom/master/TopK_Commands/#topkinfo>`_.
""" # noqa
return self.execute_command(TOPK_INFO, key)
class TDigestCommands:
def create(self, key, compression):
"""
Allocate the memory and initialize the t-digest.
For more information see `TDIGEST.CREATE <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestcreate>`_.
""" # noqa
params = [key, compression]
return self.execute_command(TDIGEST_CREATE, *params)
def reset(self, key):
"""
Reset the sketch `key` to zero - empty out the sketch and re-initialize it.
For more information see `TDIGEST.RESET <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestreset>`_.
""" # noqa
return self.execute_command(TDIGEST_RESET, key)
def add(self, key, values, weights):
"""
Add one or more samples (value with weight) to a sketch `key`.
Both `values` and `weights` are lists.
For more information see `TDIGEST.ADD <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestadd>`_.
Example:
>>> tdigestadd('A', [1500.0], [1.0])
""" # noqa
params = [key]
self.appendValuesAndWeights(params, values, weights)
return self.execute_command(TDIGEST_ADD, *params)
def merge(self, toKey, fromKey):
"""
Merge all of the values from 'fromKey' to 'toKey' sketch.
For more information see `TDIGEST.MERGE <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestmerge>`_.
""" # noqa
params = [toKey, fromKey]
return self.execute_command(TDIGEST_MERGE, *params)
def min(self, key):
"""
Return minimum value from the sketch `key`. Will return DBL_MAX if the sketch is empty.
For more information see `TDIGEST.MIN <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestmin>`_.
""" # noqa
return self.execute_command(TDIGEST_MIN, key)
def max(self, key):
"""
Return maximum value from the sketch `key`. Will return DBL_MIN if the sketch is empty.
For more information see `TDIGEST.MAX <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestmax>`_.
""" # noqa
return self.execute_command(TDIGEST_MAX, key)
def quantile(self, key, quantile):
"""
Return double value estimate of the cutoff such that a specified fraction of the data
added to this TDigest would be less than or equal to the cutoff.
For more information see `TDIGEST.QUANTILE <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestquantile>`_.
""" # noqa
params = [key, quantile]
return self.execute_command(TDIGEST_QUANTILE, *params)
def cdf(self, key, value):
"""
Return double fraction of all points added which are <= value.
For more information see `TDIGEST.CDF <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestcdf>`_.
""" # noqa
params = [key, value]
return self.execute_command(TDIGEST_CDF, *params)
def info(self, key):
"""
Return Compression, Capacity, Merged Nodes, Unmerged Nodes, Merged Weight, Unmerged Weight
and Total Compressions.
For more information see `TDIGEST.INFO <https://oss.redis.com/redisbloom/master/TDigest_Commands/#tdigestinfo>`_.
""" # noqa
return self.execute_command(TDIGEST_INFO, key)
class CMSCommands:
# region Count-Min Sketch Functions
def initbydim(self, key, width, depth):
"""
Initialize a Count-Min Sketch `key` to dimensions (`width`, `depth`) specified by user.
For more information see `CMS.INITBYDIM <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsinitbydim>`_.
""" # noqa
params = [key, width, depth]
return self.execute_command(CMS_INITBYDIM, *params)
def initbyprob(self, key, error, probability):
"""
Initialize a Count-Min Sketch `key` to characteristics (`error`, `probability`) specified by user.
For more information see `CMS.INITBYPROB <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsinitbyprob>`_.
""" # noqa
params = [key, error, probability]
return self.execute_command(CMS_INITBYPROB, *params)
def incrby(self, key, items, increments):
"""
Add/increase `items` to a Count-Min Sketch `key` by ''increments''.
Both `items` and `increments` are lists.
For more information see `CMS.INCRBY <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsincrby>`_.
Example:
>>> cmsincrby('A', ['foo'], [1])
""" # noqa
params = [key]
self.appendItemsAndIncrements(params, items, increments)
return self.execute_command(CMS_INCRBY, *params)
def query(self, key, *items):
"""
Return count for an `item` from `key`. Multiple items can be queried with one call.
For more information see `CMS.QUERY <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsquery>`_.
""" # noqa
params = [key]
params += items
return self.execute_command(CMS_QUERY, *params)
def merge(self, destKey, numKeys, srcKeys, weights=[]):
"""
Merge `numKeys` of sketches into `destKey`. Sketches specified in `srcKeys`.
All sketches must have identical width and depth.
`Weights` can be used to multiply certain sketches. Default weight is 1.
Both `srcKeys` and `weights` are lists.
For more information see `CMS.MERGE <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsmerge>`_.
""" # noqa
params = [destKey, numKeys]
params += srcKeys
self.appendWeights(params, weights)
return self.execute_command(CMS_MERGE, *params)
def info(self, key):
"""
Return width, depth and total count of the sketch.
For more information see `CMS.INFO <https://oss.redis.com/redisbloom/master/CountMinSketch_Commands/#cmsinfo>`_.
""" # noqa
return self.execute_command(CMS_INFO, key)
| 40.292929 | 132 | 0.644873 |
8b076b3d6cd00cf6639273bb8917984ce1b59d67 | 2,817 | py | Python | src/filetools/scanner.py | ownport/filemeta | 0e96abdd71a93c10d1f752b1c3532018297990dd | [
"MIT"
] | 1 | 2018-05-07T06:07:41.000Z | 2018-05-07T06:07:41.000Z | src/filetools/scanner.py | ownport/filemeta | 0e96abdd71a93c10d1f752b1c3532018297990dd | [
"MIT"
] | 7 | 2017-09-17T19:18:12.000Z | 2021-03-21T08:52:19.000Z | src/filetools/scanner.py | ownport/filemeta | 0e96abdd71a93c10d1f752b1c3532018297990dd | [
"MIT"
] | null | null | null |
import os
import json
import logging
import pathlib
from collections import Counter
from filetools.formats import sqlite
from filetools.formats import jsonline
from filetools.utils import get_meta
from filetools.utils import scan_files
from filetools.utils import progress_bar
from filetools.libs.tabulate import tabulate
logger = logging.getLogger(__name__)
class Scanner:
def __init__(self, path:str, output_type:str, output_file:str, ignore_tags:list) -> None:
if not os.path.exists(path) or not os.path.isdir(path):
raise ValueError('The directory does not exist or not a directory, {}'.format(path))
self._path = path
if not output_type or output_type not in ('jsonline', 'sqlite3'):
raise ValueError(f'The output type must be jsonline or sqlite3, founded: {output_type}')
if output_type == 'jsonline':
self._metastore = jsonline.Metastore(output_file)
else:
self._metastore = sqlite.Metastore(output_file)
self._ignore_tags = ignore_tags
def scan_files(self):
''' run scanner for getting files metadata
'''
processed_files = 0
total_files = self.stats().get('total files')
try:
progress_bar(processed_files, total_files)
for filepath in scan_files(self._path):
processed_files += 1
meta = get_meta(filepath, ignore_tags=self._ignore_tags)
meta['tags'] = json.dumps(meta['tags'])
self._metastore.put(meta)
if processed_files % 100 == 0:
progress_bar(processed_files, total_files)
self._metastore.commit()
self._metastore.commit()
print()
print(tabulate((
('Processed files', processed_files),
('Total files', total_files)
),tablefmt='github')
)
except KeyboardInterrupt:
print("Interrupted by user")
def stats(self):
''' scan directory for gettings statistics
returns the next metrics:
- total files
- total directories
- total size
'''
try:
metrics = Counter()
for root, _, files in os.walk(self._path):
metrics['total files'] += len(files)
metrics['total directories'] += 1
for filename in files:
metrics['total size'] += pathlib.Path(os.path.join(root, filename)).stat().st_size
return dict(metrics)
except KeyboardInterrupt:
print("Interrupted by user")
def close(self):
''' complete work with scanner
'''
self._metastore.close()
self._metastore = None
| 32.755814 | 102 | 0.595669 |
cea4a127e9553e55e3b6bd16fca4a77547759de7 | 1,800 | py | Python | server.py | pawni/gpuobserver | 26a588cd4f572fc5b7492a514878fdc454af628e | [
"MIT"
] | 30 | 2020-01-08T11:14:12.000Z | 2021-07-23T15:36:30.000Z | server.py | pawni/gpuobserver | 26a588cd4f572fc5b7492a514878fdc454af628e | [
"MIT"
] | 3 | 2020-06-08T15:06:15.000Z | 2021-04-18T05:26:26.000Z | server.py | pawni/gpuobserver | 26a588cd4f572fc5b7492a514878fdc454af628e | [
"MIT"
] | 3 | 2020-01-23T16:11:19.000Z | 2020-10-25T21:09:09.000Z | from flask import Flask, render_template, request
from flask.logging import default_handler
import requests
import logging
import time
import config
import gpu_helper
from statistics import get_stats_df, get_time_str
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
app = Flask(__name__)
app.logger.setLevel(logging.INFO)
default_handler.setFormatter(formatter)
@app.route('/')
def index():
return render_template('index.html', stats_time=get_time_str())
@app.route('/gpu_usage')
def gpu_usage():
show_all = (request.args.get('show_all', '0') == '1')
infos = {server: gpu_helper.get_remote_info(server) for server in config.servers}
return render_template('usage.html', infos=infos, show_all=show_all)
@app.route('/statistics')
def statistics():
df = get_stats_df()
columns=['Name', 'GPU Time', 'Used Power [Wh]', 'Generated CO2 [kg]', 'Trees * year to offset', 'Avg. Util [%]']
return df.to_html(index=False, table_id='stats_table', classes='tablesorter', columns=columns)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=5000, help="port to run server on")
parser.add_argument('--host', default='0.0.0.0', help="which host to listen on")
parser.add_argument('--config', help="json config file")
args = parser.parse_args()
if args.config is not None:
config.update_config(args.config)
gpu_helper.setup()
app.run(debug=True, host=args.host, port=args.port)
| 30.508475 | 116 | 0.691111 |
f9417a0750643c6f74fe88dc61c58db8c9ebe996 | 695 | py | Python | tools/collect_labels.py | frankShih/cnn_captcha | cde8b45ffd4eaac0fe878def1a7dfe70a61cb05a | [
"Apache-2.0"
] | null | null | null | tools/collect_labels.py | frankShih/cnn_captcha | cde8b45ffd4eaac0fe878def1a7dfe70a61cb05a | [
"Apache-2.0"
] | null | null | null | tools/collect_labels.py | frankShih/cnn_captcha | cde8b45ffd4eaac0fe878def1a7dfe70a61cb05a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Count the labels of the samples and write them into the labels.json file
"""
import os
import json
image_dir = "../sample/origin"
image_list = os.listdir(image_dir)
labels = set()
for img in image_list:
split_result = img.split("_")
if len(split_result) == 2:
label, name = split_result
if label:
for word in label:
labels.add(word)
else:
pass
print("Total tags{} species".format(len(labels)))
with open("./labels.json", "w") as f:
f.write(json.dumps("".join(list(labels)), ensure_ascii=False))
print("The label list is written to the file labels.json successfully") | 23.965517 | 72 | 0.630216 |
01ac173fb69054feed3ee1bd2db94eb13fc8852b | 3,404 | py | Python | metrics/frechet_inception_distance.py | TeeboneTing/stylegan2 | 2b0873987e84155d44ae6c88339285fc15c17837 | [
"BSD-Source-Code"
] | null | null | null | metrics/frechet_inception_distance.py | TeeboneTing/stylegan2 | 2b0873987e84155d44ae6c88339285fc15c17837 | [
"BSD-Source-Code"
] | null | null | null | metrics/frechet_inception_distance.py | TeeboneTing/stylegan2 | 2b0873987e84155d44ae6c88339285fc15c17837 | [
"BSD-Source-Code"
] | null | null | null | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Frechet Inception Distance (FID)."""
import os
import numpy as np
import scipy
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
class FID(metric_base.MetricBase):
def __init__(self, num_images, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
#inception = misc.load_pkl('http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/inception_v3_features.pkl')
inception = misc.load_pkl('misc/inception_v3_features.pkl')
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Calculate statistics for reals.
cache_file = self._get_cache_file_for_reals(num_images=self.num_images)
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
if os.path.isfile(cache_file):
mu_real, sigma_real = misc.load_pkl(cache_file)
else:
for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)):
begin = idx * minibatch_size
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True)
if end == self.num_images:
break
mu_real = np.mean(activations, axis=0)
sigma_real = np.cov(activations, rowvar=False)
misc.save_pkl((mu_real, sigma_real), cache_file)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
images = Gs_clone.get_output_for(latents, labels, **Gs_kwargs)
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate statistics for fakes.
for begin in range(0, self.num_images, minibatch_size):
self._report_progress(begin, self.num_images)
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]
mu_fake = np.mean(activations, axis=0)
sigma_fake = np.cov(activations, rowvar=False)
# Calculate FID.
m = np.square(mu_fake - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member
dist = m + np.trace(sigma_fake + sigma_real - 2*s)
self._report_result(np.real(dist))
#----------------------------------------------------------------------------
| 45.386667 | 127 | 0.633079 |
d9298208457f7b5364431dd66f52aca62b1aaedb | 2,565 | py | Python | baldi/disassemble.py | equation314/hitcon-ctf-2018-writeup | 5d82f8c3c85492cacc39663ddc2a357767ca94cb | [
"MIT"
] | 3 | 2018-10-23T01:41:53.000Z | 2019-04-09T07:39:45.000Z | baldi/disassemble.py | equation314/hitcon-ctf-2018-writeup | 5d82f8c3c85492cacc39663ddc2a357767ca94cb | [
"MIT"
] | 1 | 2018-10-26T06:47:45.000Z | 2018-10-26T13:51:11.000Z | baldi/disassemble.py | equation314/hitcon-ctf-2018-writeup | 5d82f8c3c85492cacc39663ddc2a357767ca94cb | [
"MIT"
] | 1 | 2018-10-26T06:45:39.000Z | 2018-10-26T06:45:39.000Z | from pwn import *
from capstone import *
from leb128 import *
import re
def format_riscv(code, arch):
off = 32
res = []
for line in code.split('\n'):
s = line[off:].lower()
s = s.replace(',', ', ').replace('-', ' - ').replace('+', ' + ').replace(', - ', ', -')
p = s.find('#')
if p != -1:
s = s[:p]
arr = re.split(' +', s.strip())
s = ' '.join(arr)
res.append(s)
res = '\n'.join(res)
return res
def get_cs_params(arch):
if arch == 'i386':
return CS_ARCH_X86, CS_MODE_32
elif arch == 'x86_64':
return CS_ARCH_X86, CS_MODE_64
elif arch == 'arm':
return CS_ARCH_ARM, CS_MODE_ARM
elif arch == 'aarch64':
return CS_ARCH_ARM64, CS_MODE_ARM
elif arch == 'mips':
return CS_ARCH_MIPS, CS_MODE_MIPS32 | CS_MODE_BIG_ENDIAN
elif arch == 'powerpc':
return CS_ARCH_PPC, CS_MODE_32 | CS_MODE_BIG_ENDIAN
def disassemble_wasm(code):
code2op = {
'\x41': 'i32.const',
'\x67': 'i32.clz' ,
'\x68': 'i32.ctz' ,
'\x69': 'i32.popcn',
'\x6a': 'i32.add' ,
'\x6b': 'i32.sub' ,
'\x6c': 'i32.mul' ,
'\x6d': 'i32.div_s',
'\x6e': 'i32.div_u',
'\x6f': 'i32.rem_s',
'\x70': 'i32.rem_u',
'\x71': 'i32.and' ,
'\x72': 'i32.or' ,
'\x73': 'i32.xor' ,
'\x74': 'i32.shl' ,
'\x75': 'i32.shr_s',
'\x76': 'i32.shr_u',
'\x77': 'i32.rotl' ,
'\x78': 'i32.rotr' ,
'\x0f': 'return' ,
'\x0b': 'end' ,
}
i, n = 0, len(code)
res = []
while i < n:
if code[i] not in code2op:
print(' [UNKNOWN] %02x' % ord(code[i]))
op = code2op[code[i]]
if op == 'i32.const':
x, l = leb128s_decode(bytearray(code[i+1:i+5]))
i += l
op += ' ' + str(x)
i += 1
res.append(op)
return '\n'.join(res)
def disassemble(code, arch, show_addr=False):
if arch == 'riscv':
code = disasm(code, arch=arch)
code = format_riscv(code, arch)
return code
elif arch == 'wasm':
code = disassemble_wasm(code)
return code
cs_arch, cs_mode = get_cs_params(arch)
md = Cs(cs_arch, cs_mode)
res = []
for i in md.disasm(code, 0x0):
line = ('%s %s' % (i.mnemonic, i.op_str)).strip()
if show_addr:
line = '0x%08x[0x%02x]: %s'% (i.address, i.size, line)
res.append(line)
return '\n'.join(res)
| 26.71875 | 96 | 0.478363 |
39630c17b04fe1cd31cb2e68dfd7bf296c4e39f5 | 1,005 | py | Python | tests/commands/test_show.py | pecigonzalo/opta | 0259f128ad3cfc4a96fe1f578833de28b2f19602 | [
"Apache-2.0"
] | null | null | null | tests/commands/test_show.py | pecigonzalo/opta | 0259f128ad3cfc4a96fe1f578833de28b2f19602 | [
"Apache-2.0"
] | null | null | null | tests/commands/test_show.py | pecigonzalo/opta | 0259f128ad3cfc4a96fe1f578833de28b2f19602 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from click.testing import CliRunner
from pytest_mock import MockFixture
from opta.commands.show import config
from opta.layer import StructuredConfig
def test_show_config(mocker: MockFixture) -> None:
mock_bucket = "test-bucket"
mock_config = "test-config"
mock_yaml_config = "test-yaml-config"
structured_config: StructuredConfig = {
"opta_version": "dev",
"date": datetime.utcnow().isoformat(),
"original_spec": mock_yaml_config,
"defaults": {},
}
mock_logger = mocker.patch("opta.commands.show.logger")
mocker.patch(
"opta.commands.show.AWS.get_all_remote_configs",
return_value={mock_bucket: {mock_config: structured_config}},
)
runner = CliRunner()
result = runner.invoke(config, ["--cloud", "aws"])
assert result.exit_code == 0
mock_logger.info.assert_called_once_with(
f"# Bucket Name: {mock_bucket}\n# Config Name: {mock_config}\n{mock_yaml_config}\n"
)
| 31.40625 | 91 | 0.692537 |
d7095b61aba8fe68f100ea690f2adea78df22617 | 350 | py | Python | persistence.py | Chromadream/AKP_REDDIT | ad2bb7594f4845da1cddd6ad78c7b440277f4045 | [
"MIT"
] | null | null | null | persistence.py | Chromadream/AKP_REDDIT | ad2bb7594f4845da1cddd6ad78c7b440277f4045 | [
"MIT"
] | 4 | 2019-01-08T02:39:33.000Z | 2021-06-01T23:13:53.000Z | persistence.py | Chromadream/AKP_REDDIT | ad2bb7594f4845da1cddd6ad78c7b440277f4045 | [
"MIT"
] | 1 | 2019-01-08T02:14:33.000Z | 2019-01-08T02:14:33.000Z | def list_urls(filename):
persistence_file = open(filename,'r')
urllist = [url.rstrip() for url in persistence_file.readlines()]
persistence_file.close()
return urllist
def append_url(filename,url):
persistence_file = open(filename,'a')
persistence_file.write("\n")
persistence_file.write(url)
persistence_file.close() | 31.818182 | 68 | 0.72 |
31352d3423b28c3622b33cbe729bc2be6e8265de | 98,239 | py | Python | io_scene_vrm/importer/blend_model.py | saturday06/VRM_IMPORTER_for_Blender | 42562eead251c82cf25f63451de6598b1dfa6140 | [
"MIT"
] | 105 | 2020-08-31T13:03:27.000Z | 2021-02-05T12:33:53.000Z | io_scene_vrm/importer/blend_model.py | saturday06/VRM_IMPORTER_for_Blender | 42562eead251c82cf25f63451de6598b1dfa6140 | [
"MIT"
] | 44 | 2020-09-05T20:38:57.000Z | 2021-02-02T13:00:26.000Z | io_scene_vrm/importer/blend_model.py | saturday06/VRM_IMPORTER_for_Blender | 42562eead251c82cf25f63451de6598b1dfa6140 | [
"MIT"
] | 12 | 2020-10-02T14:10:31.000Z | 2021-02-01T10:51:52.000Z | """
Copyright (c) 2018 iCyP
Released under the MIT license
https://opensource.org/licenses/mit-license.php
"""
import base64
import collections
import contextlib
import copy
import itertools
import json
import math
import os.path
import secrets
import shutil
import string
import struct
import sys
import tempfile
from math import radians, sqrt
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple
import bpy
import mathutils
from mathutils import Matrix, Vector
from .. import deep, editor, exporter, vrm_types
from ..gl_constants import GlConstants
from ..shader import shader_node_group_import
from .py_model import (
PyMaterial,
PyMaterialGltf,
PyMaterialMtoon,
PyMaterialTransparentZWrite,
PyMesh,
PyModel,
parse_glb,
remove_unsafe_path_chars,
)
class RetryUsingLegacyImporter(Exception):
pass
class BlendModel:
def __init__(
self,
context: bpy.types.Context,
py_model: PyModel,
extract_textures_into_folder: bool,
make_new_texture_folder: bool,
legacy_importer: bool,
use_experimental_vrm_component_ui: bool,
) -> None:
self.use_experimental_vrm_component_ui = use_experimental_vrm_component_ui
self.meshes: Dict[int, bpy.types.Object] = {}
self.extract_textures_into_folder = extract_textures_into_folder
self.make_new_texture_folder = make_new_texture_folder
self.legacy_importer = legacy_importer
self.import_id = "BlenderVrmAddonImport" + (
"".join(secrets.choice(string.digits) for _ in range(10))
)
self.temp_object_name_count = 0
self.context = context
self.py_model = py_model
self.images: Dict[int, bpy.types.Image] = {}
self.armature: Optional[bpy.types.Object] = None
self.bones: Dict[int, bpy.types.Bone] = {}
self.gltf_materials: Dict[int, bpy.types.Material] = {}
self.vrm_materials: Dict[int, bpy.types.Material] = {}
self.primitive_obj_dict: Optional[Dict[Optional[int], List[float]]] = None
self.mesh_joined_objects = None
self.vrm0_extension: Optional[Dict[str, Any]] = None
self.vrm_extension: Optional[Dict[str, Any]] = None
self.vrm_model_build()
def vrm_model_build(self) -> None:
wm = bpy.context.window_manager
def prog(z: int) -> int:
wm.progress_update(z)
return z + 1
wm.progress_begin(0, 11)
try:
i = 1
affected_object = self.scene_init()
i = prog(i)
self.parse_vrm_extension()
i = prog(i)
if self.legacy_importer:
self.texture_load()
i = prog(i)
self.make_armature()
else:
self.summon()
if self.extract_textures_into_folder:
i = prog(i)
self.extract_textures()
i = prog(i)
self.use_fake_user_for_thumbnail()
i = prog(i)
self.connect_bones()
i = prog(i)
self.make_material()
i = prog(i)
if self.legacy_importer:
self.make_primitive_mesh_objects(wm, i)
# i=prog(i) ↑関数内でやる
self.json_dump()
i = prog(i)
self.attach_vrm_attributes()
i = prog(i)
self.cleaning_data()
if self.legacy_importer:
i = prog(i)
self.set_bone_roll()
i = prog(i)
self.put_spring_bone_info()
i = prog(i)
self.finishing(affected_object)
finally:
wm.progress_end()
if (2, 90) <= bpy.app.version < (2, 91):
# https://developer.blender.org/T79182
bpy.context.window.cursor_modal_set("HAND")
bpy.context.window.cursor_modal_restore()
@staticmethod
def axis_glb_to_blender(vec3: Sequence[float]) -> List[float]:
return [vec3[i] * t for i, t in zip([0, 2, 1], [-1, 1, 1])]
def parse_vrm_extension(self) -> None:
json_dict = self.py_model.json
vrm = deep.get(json_dict, ["extensions", "VRMC_vrm"])
if isinstance(vrm, dict):
self.vrm_extension = vrm
return
vrm0 = deep.get(json_dict, ["extensions", "VRM"])
if not isinstance(vrm0, dict):
vrm0 = None
self.vrm0_extension = vrm0
def summon(self) -> None:
with open(self.py_model.filepath, "rb") as f:
json_dict, body_binary = parse_glb(f.read())
for key in ["nodes", "materials", "meshes"]:
if key not in json_dict or not isinstance(json_dict[key], list):
continue
for index, value in enumerate(json_dict[key]):
if not isinstance(value, dict):
continue
if "extras" not in value or not isinstance(value["extras"], dict):
value["extras"] = {}
value["extras"].update({self.import_id + key.capitalize(): index})
if (
key == "nodes"
and "mesh" in value
and isinstance(value["mesh"], int)
):
value["extras"].update({self.import_id + "Meshes": value["mesh"]})
image_name_prefix = self.import_id + "Image"
if isinstance(json_dict.get("images"), list):
for image_index, image in enumerate(json_dict["images"]):
if not isinstance(image, dict):
continue
if not isinstance(image.get("name"), str) or not image["name"]:
image["name"] = f"Image{image_index}"
image["name"] = (
image_name_prefix + str(image_index) + "_" + image["name"]
)
if isinstance(json_dict.get("meshes"), list):
for mesh in json_dict["meshes"]:
if (
isinstance(mesh.get("extras"), dict)
and isinstance(mesh["extras"].get("targetNames"), list)
) or not isinstance(mesh["primitives"], list):
continue
for primitive in mesh["primitives"]:
if (
not isinstance(primitive, dict)
or not isinstance(primitive.get("extras"), dict)
or not isinstance(primitive["extras"].get("targetNames"), list)
):
continue
if mesh.get("extras") is None:
mesh["extras"] = {}
mesh["extras"]["targetNames"] = primitive["extras"]["targetNames"]
break
if (
isinstance(json_dict.get("textures"), list)
and len(json_dict["textures"]) > 0
):
primitives = []
for texture_index, _ in enumerate(json_dict["textures"]):
if not isinstance(json_dict.get("buffers"), list):
json_dict["buffers"] = []
position_buffer_index = len(json_dict["buffers"])
position_buffer_bytes = struct.pack(
"<9f", 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0
)
json_dict["buffers"].append(
{
"uri": "data:application/gltf-buffer;base64,"
+ base64.b64encode(position_buffer_bytes).decode("ascii"),
"byteLength": len(position_buffer_bytes),
}
)
texcoord_buffer_index = len(json_dict["buffers"])
texcoord_buffer_bytes = struct.pack("<6f", 0.0, 0.0, 1.0, 0.0, 0.0, 1.0)
json_dict["buffers"].append(
{
"uri": "data:application/gltf-buffer;base64,"
+ base64.b64encode(texcoord_buffer_bytes).decode("ascii"),
"byteLength": len(texcoord_buffer_bytes),
}
)
if not isinstance(json_dict.get("bufferViews"), list):
json_dict["bufferViews"] = []
position_buffer_view_index = len(json_dict["bufferViews"])
json_dict["bufferViews"].append(
{
"buffer": position_buffer_index,
"byteOffset": 0,
"byteLength": len(position_buffer_bytes),
}
)
texcoord_buffer_view_index = len(json_dict["bufferViews"])
json_dict["bufferViews"].append(
{
"buffer": texcoord_buffer_index,
"byteOffset": 0,
"byteLength": 24,
}
)
if not isinstance(json_dict.get("accessors"), list):
json_dict["accessors"] = []
position_accessors_index = len(json_dict["accessors"])
json_dict["accessors"].append(
{
"bufferView": position_buffer_view_index,
"byteOffset": 0,
"type": "VEC3",
"componentType": GlConstants.FLOAT,
"count": 3,
"min": [0, 0, 0],
"max": [1, 1, 0],
}
)
texcoord_accessors_index = len(json_dict["accessors"])
json_dict["accessors"].append(
{
"bufferView": texcoord_buffer_view_index,
"byteOffset": 0,
"type": "VEC2",
"componentType": GlConstants.FLOAT,
"count": 3,
}
)
if not isinstance(json_dict.get("materials"), list):
json_dict["materials"] = []
tex_material_index = len(json_dict["materials"])
json_dict["materials"].append(
{
"name": self.temp_object_name(),
"emissiveTexture": {"index": texture_index},
}
)
primitives.append(
{
"attributes": {
"POSITION": position_accessors_index,
"TEXCOORD_0": texcoord_accessors_index,
},
"material": tex_material_index,
}
)
if not isinstance(json_dict.get("meshes"), list):
json_dict["meshes"] = []
tex_mesh_index = len(json_dict["meshes"])
json_dict["meshes"].append(
{"name": self.temp_object_name(), "primitives": primitives}
)
if not isinstance(json_dict.get("nodes"), list):
json_dict["nodes"] = []
tex_node_index = len(json_dict["nodes"])
json_dict["nodes"].append(
{"name": self.temp_object_name(), "mesh": tex_mesh_index}
)
if not isinstance(json_dict.get("scenes"), list):
json_dict["scenes"] = []
json_dict["scenes"].append(
{"name": self.temp_object_name(), "nodes": [tex_node_index]}
)
if isinstance(json_dict.get("scenes"), list) and isinstance(
json_dict.get("nodes"), list
):
nodes = json_dict["nodes"]
skins = json_dict.get("skins", [])
for scene in json_dict["scenes"]:
if not isinstance(scene.get("nodes"), list):
continue
all_node_indices = list(scene["nodes"])
referenced_node_indices = list(scene["nodes"])
search_node_indices = list(scene["nodes"])
while search_node_indices:
search_node_index = search_node_indices.pop()
if not isinstance(search_node_index, int):
continue
all_node_indices.append(search_node_index)
if search_node_index < 0 or len(nodes) <= search_node_index:
continue
node = nodes[search_node_index]
if isinstance(node.get("mesh"), int):
referenced_node_indices.append(search_node_index)
if isinstance(node.get("skin"), int):
referenced_node_indices.append(search_node_index)
if node["skin"] < 0 or len(skins) <= node["skin"]:
continue
skin = skins[node["skin"]]
if isinstance(skin.get("skeleton"), int):
referenced_node_indices.append(skin["skeleton"])
if isinstance(skin.get("joints"), list):
referenced_node_indices.extend(skin["joints"])
if isinstance(node.get("children"), list):
search_node_indices.extend(node["children"])
retain_node_indices = list(dict.fromkeys(all_node_indices)) # distinct
for referenced_node_index in referenced_node_indices:
if referenced_node_index in retain_node_indices:
retain_node_indices.remove(referenced_node_index)
if not retain_node_indices:
continue
if not isinstance(json_dict.get("buffers"), list):
json_dict["buffers"] = []
position_buffer_index = len(json_dict["buffers"])
position_buffer_bytes = struct.pack(
"<9f", 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0
)
json_dict["buffers"].append(
{
"uri": "data:application/gltf-buffer;base64,"
+ base64.b64encode(position_buffer_bytes).decode("ascii"),
"byteLength": len(position_buffer_bytes),
}
)
joints_buffer_index = len(json_dict["buffers"])
joints_buffer_bytes = struct.pack(
"<12H", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
)
json_dict["buffers"].append(
{
"uri": "data:application/gltf-buffer;base64,"
+ base64.b64encode(joints_buffer_bytes).decode("ascii"),
"byteLength": len(joints_buffer_bytes),
}
)
weights_buffer_index = len(json_dict["buffers"])
weights_buffer_bytes = struct.pack(
"<12f", 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0
)
json_dict["buffers"].append(
{
"uri": "data:application/gltf-buffer;base64,"
+ base64.b64encode(weights_buffer_bytes).decode("ascii"),
"byteLength": len(weights_buffer_bytes),
}
)
if not isinstance(json_dict.get("bufferViews"), list):
json_dict["bufferViews"] = []
position_buffer_view_index = len(json_dict["bufferViews"])
json_dict["bufferViews"].append(
{
"buffer": position_buffer_index,
"byteOffset": 0,
"byteLength": len(position_buffer_bytes),
}
)
joints_buffer_view_index = len(json_dict["bufferViews"])
json_dict["bufferViews"].append(
{
"buffer": joints_buffer_index,
"byteOffset": 0,
"byteLength": len(joints_buffer_bytes),
}
)
weights_buffer_view_index = len(json_dict["bufferViews"])
json_dict["bufferViews"].append(
{
"buffer": weights_buffer_index,
"byteOffset": 0,
"byteLength": len(weights_buffer_bytes),
}
)
if not isinstance(json_dict.get("accessors"), list):
json_dict["accessors"] = []
position_accessors_index = len(json_dict["accessors"])
json_dict["accessors"].append(
{
"bufferView": position_buffer_view_index,
"byteOffset": 0,
"type": "VEC3",
"componentType": GlConstants.FLOAT,
"count": 3,
"min": [0, 0, 0],
"max": [1, 1, 0],
}
)
joints_accessors_index = len(json_dict["accessors"])
json_dict["accessors"].append(
{
"bufferView": joints_buffer_view_index,
"byteOffset": 0,
"type": "VEC4",
"componentType": GlConstants.UNSIGNED_SHORT,
"count": 3,
}
)
weights_accessors_index = len(json_dict["accessors"])
json_dict["accessors"].append(
{
"bufferView": weights_buffer_view_index,
"byteOffset": 0,
"type": "VEC4",
"componentType": GlConstants.FLOAT,
"count": 3,
}
)
primitives = [
{
"attributes": {
"POSITION": position_accessors_index,
"JOINTS_0": joints_accessors_index,
"WEIGHTS_0": weights_accessors_index,
}
}
]
if not isinstance(json_dict.get("meshes"), list):
json_dict["meshes"] = []
skin_mesh_index = len(json_dict["meshes"])
json_dict["meshes"].append(
{"name": self.temp_object_name(), "primitives": primitives}
)
if not isinstance(json_dict.get("skins"), list):
json_dict["skins"] = []
skin_index = len(json_dict["skins"])
json_dict["skins"].append({"joints": list(retain_node_indices)})
if not isinstance(json_dict.get("nodes"), list):
json_dict["nodes"] = []
skin_node_index = len(json_dict["nodes"])
json_dict["nodes"].append(
{
"name": self.temp_object_name(),
"mesh": skin_mesh_index,
"skin": skin_index,
}
)
scene["nodes"].append(skin_node_index)
full_vrm_import_success = False
with tempfile.NamedTemporaryFile(delete=False) as indexed_vrm_file:
indexed_vrm_file.write(exporter.glb_obj.pack_glb(json_dict, body_binary))
indexed_vrm_file.flush()
try:
bpy.ops.import_scene.gltf(
filepath=indexed_vrm_file.name,
import_pack_images=True,
bone_heuristic="FORTUNE",
)
full_vrm_import_success = True
except RuntimeError:
self.cleanup()
if not full_vrm_import_success:
# Some VRM has broken animations.
# https://github.com/saturday06/VRM_Addon_for_Blender/issues/58
if "animations" in json_dict:
del json_dict["animations"]
with tempfile.NamedTemporaryFile(delete=False) as indexed_vrm_file:
indexed_vrm_file.write(
exporter.glb_obj.pack_glb(json_dict, body_binary)
)
indexed_vrm_file.flush()
try:
bpy.ops.import_scene.gltf(
filepath=indexed_vrm_file.name,
import_pack_images=True,
bone_heuristic="FORTUNE",
)
except RuntimeError as e:
self.cleanup()
raise RetryUsingLegacyImporter() from e
spec_version: Optional[str] = None
hips_bone_node_index: Optional[int] = None
if self.vrm_extension is not None:
spec_version = "1.0_draft"
hips_index = deep.get(
self.vrm_extension, ["humanoid", "humanBones", "hips", "node"]
)
if isinstance(hips_index, int):
hips_bone_node_index = hips_index
elif self.vrm0_extension is not None:
spec_version = "0.0"
human_bones = deep.get(self.vrm0_extension, ["humanoid", "humanBones"], [])
if isinstance(human_bones, list):
for human_bone in human_bones:
if (
isinstance(human_bone, dict)
and human_bone.get("bone") == "hips"
and isinstance(human_bone.get("node"), int)
):
hips_bone_node_index = human_bone["node"]
break
extras_node_index_key = self.import_id + "Nodes"
if hips_bone_node_index is not None:
for obj in bpy.context.selectable_objects:
data = obj.data
if not isinstance(data, bpy.types.Armature):
continue
for bone in data.bones:
bone_node_index = bone.get(extras_node_index_key)
if not isinstance(bone_node_index, int):
continue
if 0 <= bone_node_index < len(self.py_model.json["nodes"]):
node = self.py_model.json["nodes"][bone_node_index]
node["name"] = bone.name
del bone[extras_node_index_key]
self.bones[bone_node_index] = bone
if (
self.armature is not None
or bone_node_index != hips_bone_node_index
):
continue
if spec_version == "0.0" and obj.rotation_mode == "QUATERNION":
obj.rotation_quaternion.rotate(
mathutils.Euler((0.0, 0.0, math.pi), "XYZ")
)
obj.select_set(True)
previous_active = bpy.context.view_layer.objects.active
try:
bpy.context.view_layer.objects.active = obj
bpy.ops.object.transform_apply(rotation=True)
finally:
bpy.context.view_layer.objects.active = previous_active
self.armature = obj
extras_mesh_index_key = self.import_id + "Meshes"
for obj in bpy.context.selectable_objects:
data = obj.data
if not isinstance(data, bpy.types.Mesh):
continue
mesh_index = obj.data.get(extras_mesh_index_key)
if not isinstance(mesh_index, int):
mesh_index = obj.get(extras_mesh_index_key)
if not isinstance(mesh_index, int):
continue
del obj[extras_mesh_index_key]
self.meshes[mesh_index] = obj
else:
del obj.data[extras_mesh_index_key]
self.meshes[mesh_index] = obj
extras_material_index_key = self.import_id + "Materials"
for material in bpy.data.materials:
material_index = material.get(extras_material_index_key)
if not isinstance(material_index, int):
continue
del material[extras_material_index_key]
self.gltf_materials[material_index] = material
for image in list(bpy.data.images):
if not image.name.startswith(image_name_prefix):
continue
image_index = int(
"".join(image.name.split(image_name_prefix)[1:]).split("_", maxsplit=1)[
0
]
)
if 0 <= image_index < len(json_dict["images"]):
# image.nameはインポート時に勝手に縮められてしまうことがあるので、jsonの値から復元する
indexed_image_name = json_dict["images"][image_index].get("name")
if indexed_image_name:
image.name = "_".join(indexed_image_name.split("_")[1:])
else:
image.name = f"Image{image_index}"
else:
image.name = "_".join(image.name.split("_")[1:])
image.unpack(method="WRITE_ORIGINAL")
image_path = os.path.join(
os.path.dirname(image.filepath_from_user()),
remove_unsafe_path_chars(image.name)
+ os.path.splitext(bpy.path.basename(image.filepath_from_user()))[1],
)
with contextlib.suppress(IOError, shutil.SameFileError):
shutil.copyfile(image.filepath_from_user(), image_path)
image.filepath = image_path
image.reload()
image.pack()
self.images[image_index] = image
if bpy.context.object is not None and bpy.context.object.mode == "EDIT":
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
for obj in list(bpy.data.objects):
if self.is_temp_object_name(obj.name):
obj.select_set(True)
bpy.ops.object.delete()
for material in list(bpy.data.materials):
if self.is_temp_object_name(material.name) and material.users == 0:
print(material.name)
bpy.data.materials.remove(material)
armature = self.armature
if armature is None:
raise Exception("Failed to read VRM Humanoid")
def cleanup(self) -> None:
if (
self.context.view_layer.objects.active is not None
and self.context.view_layer.objects.active.mode != "OBJECT"
):
bpy.ops.object.mode_set(mode="OBJECT")
meshes_key = self.import_id + "Meshes"
nodes_key = self.import_id + "Nodes"
remove_objs = []
for obj in list(self.context.scene.collection.objects):
if isinstance(obj.data, bpy.types.Armature):
for bone in obj.data.bones:
if nodes_key in bone:
remove_objs.append(obj)
break
continue
if isinstance(obj.data, bpy.types.Mesh) and (
nodes_key in obj.data
or meshes_key in obj.data
or self.is_temp_object_name(obj.data.name)
):
remove_objs.append(obj)
continue
if (
nodes_key in obj
or meshes_key in obj
or self.is_temp_object_name(obj.name)
):
remove_objs.append(obj)
bpy.ops.object.select_all(action="DESELECT")
for obj in remove_objs:
obj.select_set(True)
bpy.ops.object.delete()
retry = True
while retry:
retry = False
for obj in bpy.data.objects:
if obj in remove_objs and not obj.users:
retry = True
bpy.data.objects.remove(obj, do_unlink=True)
def temp_object_name(self) -> str:
self.temp_object_name_count += 1
return f"{self.import_id}Temp_{self.temp_object_name_count}_"
def is_temp_object_name(self, name: str) -> bool:
return name.startswith(f"{self.import_id}Temp_")
def connect_bones(self) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
# Blender_VRMAutoIKSetup (MIT License)
# https://booth.pm/ja/items/1697977
previous_active = bpy.context.view_layer.objects.active
try:
bpy.context.view_layer.objects.active = self.armature # アーマチャーをアクティブに
bpy.ops.object.mode_set(mode="EDIT") # エディットモードに入る
disconnected_bone_names = [] # 結合されてないボーンのリスト
vrm0_extension = self.vrm0_extension
if vrm0_extension is not None and str(
vrm0_extension.get("exporterVersion")
).startswith("VRoidStudio-"):
disconnected_bone_names = [
"J_Bip_R_Hand",
"J_Bip_L_Hand",
"J_Bip_L_LowerLeg",
"J_Bip_R_LowerLeg",
]
bpy.ops.armature.select_all(action="SELECT") # 全てのボーンを選択
for bone in bpy.context.selected_bones: # 選択しているボーンに対して繰り返し処理
for (
disconnected_bone_name
) in disconnected_bone_names: # 結合されてないボーンのリスト分繰り返し処理
# リストに該当するオブジェクトがシーン中にあったら処理
if bone.name == disconnected_bone_name:
# disconnected_bone変数に処理ボーンを代入
disconnected_bone = armature.data.edit_bones[
disconnected_bone_name
]
# 処理対象の親ボーンのTailと処理対象のHeadを一致させる
disconnected_bone.parent.tail = disconnected_bone.head
editor.make_armature.connect_parent_tail_and_child_head_if_same_position(
armature.data
)
bpy.ops.object.mode_set(mode="OBJECT")
finally:
bpy.context.view_layer.objects.active = previous_active
def scene_init(self) -> bpy.types.Object:
# active_objectがhideだとbpy.ops.object.mode_set.poll()に失敗してエラーが出るのでその回避と、それを元に戻す
affected_object = None
if self.context.active_object is not None:
if (
hasattr(self.context.active_object, "hide_viewport")
and self.context.active_object.hide_viewport
):
self.context.active_object.hide_viewport = False
affected_object = self.context.active_object
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
return affected_object
def finishing(self, affected_object: bpy.types.Object) -> None:
# initで弄ったやつを戻す
if affected_object is not None:
affected_object.hide_viewport = True
for obj in bpy.context.selected_objects:
obj.select_set(False)
# image_path_to Texture
def texture_load(self) -> None:
for (image_index, image_props) in enumerate(self.py_model.image_properties):
img = bpy.data.images.load(image_props.filepath)
if not self.extract_textures_into_folder:
# https://github.com/KhronosGroup/glTF-Blender-IO/blob/blender-v2.82-release/addons/io_scene_gltf2/blender/imp/gltf2_blender_image.py#L100
img.pack()
self.images[image_index] = img
def use_fake_user_for_thumbnail(self) -> None:
# サムネイルはVRMの仕様ではimageのインデックスとあるが、UniVRMの実装ではtextureのインデックスになっている
# https://github.com/vrm-c/UniVRM/blob/v0.67.0/Assets/VRM/Runtime/IO/VRMImporterContext.cs#L308
json_texture_index = deep.get(self.vrm0_extension, ["meta", "texture"], -1)
if not isinstance(json_texture_index, int):
raise Exception('json["extensions"]["VRM"]["meta"]["texture"] is not int')
json_textures = self.py_model.json.get("textures", [])
if not isinstance(json_textures, list):
raise Exception('json["textures"] is not list')
if json_texture_index not in (-1, None) and (
"textures" in self.py_model.json and len(json_textures) > json_texture_index
):
image_index = json_textures[json_texture_index].get("source")
if image_index in self.images:
self.images[image_index].use_fake_user = True
def make_armature(self) -> None:
# build bones as armature
armature_data = bpy.data.armatures.new("Armature")
self.armature = bpy.data.objects.new(armature_data.name, armature_data)
self.context.scene.collection.objects.link(self.armature)
bpy.context.view_layer.objects.active = self.armature
bpy.ops.object.mode_set(mode="EDIT")
self.bones = {}
armature_edit_bones: Dict[int, bpy.types.Bone] = {}
# region bone recursive func
def bone_chain(node_id: int, parent_node_id: int) -> None:
if node_id == -1: # 自身がrootのrootの時
return
py_bone = self.py_model.nodes_dict[node_id]
if py_bone.blend_bone: # すでに割り当て済みのボーンが出てきたとき、その親の位置に動かす
if parent_node_id == -1 or py_bone.blend_bone.parent is not None:
return
py_bone.blend_bone.parent = self.bones[parent_node_id]
li = [py_bone.blend_bone]
while li:
bo = li.pop()
bo.translate(self.bones[parent_node_id].head)
for ch in bo.children:
li.append(ch)
return
if py_bone.mesh_id is not None and py_bone.children is None:
return # 子がなく、mesh属性を持つnodeはboneを生成しない
b = armature_edit_bones[node_id]
py_bone.name = b.name
py_bone.blend_bone = b
if parent_node_id == -1:
parent_pos = [0, 0, 0]
else:
parent_pos = self.bones[parent_node_id].head
b.head = tuple(
Vector(parent_pos) + Vector(self.axis_glb_to_blender(py_bone.position))
)
# region temporary tail pos(glTF doesn't have bone. there defines as joints )
def vector_length(bone_vector: List[float]) -> float:
return sqrt(
pow(bone_vector[0], 2)
+ pow(bone_vector[1], 2)
+ pow(bone_vector[2], 2)
)
# glTFは関節で定義されていて骨の長さとか向きとかないからまあなんかそれっぽい方向にボーンを向けて伸ばしたり縮めたり
if py_bone.children is None:
if parent_node_id == -1: # 唯我独尊:上向けとけ
b.tail = [b.head[0], b.head[1] + 0.05, b.head[2]]
else: # normalize length to 0.03 末代:親から距離をちょっととる感じ
# 0除算除けと気分
length = max(
0.01,
vector_length(self.axis_glb_to_blender(py_bone.position)) * 30,
)
pos_diff = [
self.axis_glb_to_blender(py_bone.position)[i] / length
for i in range(3)
]
if vector_length(pos_diff) <= 0.001:
# ボーンの長さが1mm以下なら上に10cm延ばす 長さが0だとOBJECT MODEに戻った時にボーンが消えるので上向けとく
pos_diff[1] += 0.01
b.tail = [b.head[i] + pos_diff[i] for i in range(3)]
else: # 子供たちの方向の中間を見る
mean_relate_pos = Vector([0.0, 0.0, 0.0])
for child_id in py_bone.children:
mean_relate_pos += Vector(
self.axis_glb_to_blender(
self.py_model.nodes_dict[child_id].position
)
)
children_len = len(py_bone.children)
if children_len > 0:
mean_relate_pos = mean_relate_pos / children_len
if (
vector_length(mean_relate_pos) <= 0.001
): # ボーンの長さが1mm以下なら上に10cm延ばす
mean_relate_pos[1] += 0.1
b.tail = tuple(Vector(b.head) + mean_relate_pos)
# endregion tail pos
self.bones[node_id] = b
if parent_node_id != -1:
b.parent = self.bones[parent_node_id]
if py_bone.children is not None:
for x in py_bone.children:
bone_nodes.append((x, node_id))
# endregion bone recursive func
root_node_set = list(dict.fromkeys(self.py_model.skins_root_node_list))
root_nodes = (
root_node_set
if root_node_set
else [
node
for scene in self.py_model.json["scenes"]
for node in scene["nodes"]
]
)
# generate edit_bones sorted by node_id for deterministic vrm output
def find_connected_node_ids(parent_node_ids: Sequence[int]) -> Set[int]:
node_ids = set(parent_node_ids)
for parent_node_id in parent_node_ids:
py_bone = self.py_model.nodes_dict[parent_node_id]
if py_bone.children is not None:
node_ids |= find_connected_node_ids(py_bone.children)
return node_ids
for node_id in sorted(find_connected_node_ids(root_nodes)):
bone_name = self.py_model.nodes_dict[node_id].name
armature_edit_bones[node_id] = self.armature.data.edit_bones.new(bone_name)
bone_nodes = [(root_node, -1) for root_node in root_nodes]
while bone_nodes:
bone_chain(*bone_nodes.pop())
# call when bone built
self.context.scene.view_layers.update()
bpy.ops.object.mode_set(mode="OBJECT")
def extract_textures(self) -> None:
dir_path = os.path.abspath(self.py_model.filepath) + ".textures"
if self.make_new_texture_folder:
for i in range(100001):
checking_dir_path = dir_path if i == 0 else f"{dir_path}.{i}"
if not os.path.exists(checking_dir_path):
os.mkdir(checking_dir_path)
dir_path = checking_dir_path
break
for image_index, image in self.images.items():
image_name = image.name
image_type = image.file_format.lower()
if image_name == "":
image_name = "texture_" + str(image_index)
print(f"no name image is named {image_name}")
elif len(image_name) >= 100:
new_image_name = "texture_too_long_name_" + str(image_index)
print(f"too long name image: {image_name} is named {new_image_name}")
image_name = new_image_name
image_name = remove_unsafe_path_chars(image_name)
image_path = os.path.join(dir_path, image_name)
if not image_name.lower().endswith("." + image_type.lower()):
image_path += "." + image_type
if not os.path.exists(image_path):
image.unpack(method="WRITE_ORIGINAL")
with contextlib.suppress(IOError, shutil.SameFileError):
shutil.copyfile(image.filepath_from_user(), image_path)
image.filepath = image_path
image.reload()
else:
written_flag = False
for i in range(100000):
root, ext = os.path.splitext(image_name)
second_image_name = root + "_" + str(i) + ext
image_path = os.path.join(dir_path, second_image_name)
if not os.path.exists(image_path):
image.unpack(method="WRITE_ORIGINAL")
shutil.copyfile(image.filepath_from_user(), image_path)
image.filepath = image_path
image.reload()
written_flag = True
break
if not written_flag:
print(
"There are more than 100000 images with the same name in the folder."
+ f" Failed to write file: {image_name}"
)
# region material
def make_material(self) -> None:
# 適当なので要調整
for index, mat in enumerate(self.py_model.materials):
if (
bpy.app.version >= (2, 83)
and isinstance(mat, PyMaterialGltf)
and not mat.vrm_addon_for_blender_legacy_gltf_material
):
continue
b_mat = bpy.data.materials.new(mat.name)
b_mat["shader_name"] = mat.shader_name
if isinstance(mat, PyMaterialGltf):
self.build_material_from_gltf(b_mat, mat)
elif isinstance(mat, PyMaterialMtoon):
self.build_material_from_mtoon(b_mat, mat)
elif isinstance(mat, PyMaterialTransparentZWrite):
self.build_material_from_transparent_z_write(b_mat, mat)
else:
print(f"unknown material {mat.name}")
self.node_placer(b_mat.node_tree.nodes["Material Output"])
self.vrm_materials[index] = b_mat
gltf_material_original_names = {
vrm_material_index: self.gltf_materials[vrm_material_index].name
for vrm_material_index in self.vrm_materials
if vrm_material_index in self.gltf_materials
}
for mesh in self.meshes.values():
for material_index, material in enumerate(mesh.data.materials):
for vrm_material_index, vrm_material in self.vrm_materials.items():
material_original_name = gltf_material_original_names.get(
vrm_material_index
)
if (
material_original_name is None
or material != self.gltf_materials.get(vrm_material_index)
):
continue
material.name = "glTF_VRM_overridden_" + material_original_name
vrm_material.name = material_original_name
mesh.data.materials[material_index] = vrm_material
break
# region material_util func
def set_material_transparent(
self,
b_mat: bpy.types.Material,
pymat: PyMaterial,
transparent_mode: str,
) -> None:
if transparent_mode == "OPAQUE":
pass
elif transparent_mode == "CUTOUT":
b_mat.blend_method = "CLIP"
if isinstance(pymat, PyMaterialMtoon): # TODO: TransparentZWrite?
b_mat.alpha_threshold = pymat.float_props_dic.get("_Cutoff", 0.5)
else:
b_mat.alpha_threshold = getattr(pymat, "alphaCutoff", 0.5)
b_mat.shadow_method = "CLIP"
else: # Z_TRANSPARENCY or Z()_zwrite
if "transparent_shadow_method" in dir(b_mat): # old blender 2.80 beta
b_mat.blend_method = "HASHED"
b_mat.transparent_shadow_method = "HASHED"
else:
b_mat.blend_method = "HASHED"
b_mat.shadow_method = "HASHED"
def material_init(self, b_mat: bpy.types.Material) -> None:
b_mat.use_nodes = True
for node in b_mat.node_tree.nodes:
if node.type != "OUTPUT_MATERIAL":
b_mat.node_tree.nodes.remove(node)
def connect_value_node(
self,
material: bpy.types.ShaderNode,
value: float,
socket_to_connect: bpy.types.NodeSocketFloat,
) -> bpy.types.ShaderNodeValue:
value_node = material.node_tree.nodes.new("ShaderNodeValue")
value_node.label = socket_to_connect.name
value_node.outputs[0].default_value = value
material.node_tree.links.new(socket_to_connect, value_node.outputs[0])
return value_node
def connect_rgb_node(
self,
material: bpy.types.ShaderNode,
color: Optional[Sequence[float]],
socket_to_connect: bpy.types.NodeSocketColor,
default_color: Optional[List[float]] = None,
) -> bpy.types.ShaderNodeRGB:
rgb_node = material.node_tree.nodes.new("ShaderNodeRGB")
rgb_node.label = socket_to_connect.name
rgb_node.outputs[0].default_value = (
color if color else (default_color if default_color else [1, 1, 1, 1])
)
material.node_tree.links.new(socket_to_connect, rgb_node.outputs[0])
return rgb_node
def connect_texture_node(
self,
material: bpy.types.ShaderNode,
tex_index: int,
color_socket_to_connect: Optional[bpy.types.NodeSocketColor] = None,
alpha_socket_to_connect: Optional[bpy.types.NodeSocketFloat] = None,
) -> bpy.types.ShaderNodeTexImage:
tex = self.py_model.json["textures"][tex_index]
image_index = tex["source"]
sampler = (
self.py_model.json["samplers"][tex["sampler"]]
if "samplers" in self.py_model.json
else [{"wrapS": GlConstants.REPEAT, "magFilter": GlConstants.LINEAR}]
)
image_node = material.node_tree.nodes.new("ShaderNodeTexImage")
if image_index in self.images:
image_node.image = self.images[image_index]
if color_socket_to_connect is not None:
image_node.label = color_socket_to_connect.name
elif alpha_socket_to_connect is not None:
image_node.label = alpha_socket_to_connect.name
else:
image_node.label = "what_is_this_node"
# blender is ('Linear', 'Closest', 'Cubic', 'Smart') glTF is Linear, Closest
filter_type = (
sampler["magFilter"] if "magFilter" in sampler else GlConstants.LINEAR
)
if filter_type == GlConstants.NEAREST:
image_node.interpolation = "Closest"
else:
image_node.interpolation = "Linear"
# blender is ('REPEAT', 'EXTEND', 'CLIP') glTF is CLAMP_TO_EDGE,MIRRORED_REPEAT,REPEAT
wrap_type = sampler["wrapS"] if "wrapS" in sampler else GlConstants.REPEAT
if wrap_type in (GlConstants.REPEAT, GlConstants.MIRRORED_REPEAT):
image_node.extension = "REPEAT"
else:
image_node.extension = "EXTEND"
if None not in (color_socket_to_connect, tex_index):
material.node_tree.links.new(
color_socket_to_connect, image_node.outputs["Color"]
)
if None not in (alpha_socket_to_connect, tex_index):
material.node_tree.links.new(
alpha_socket_to_connect, image_node.outputs["Alpha"]
)
return image_node
def connect_with_color_multiply_node(
self,
material: bpy.types.ShaderNode,
color: List[float],
tex_index: int,
socket_to_connect: bpy.types.NodeSocketColor,
) -> bpy.types.ShaderNodeMixRGB:
multiply_node = material.node_tree.nodes.new("ShaderNodeMixRGB")
multiply_node.blend_type = "MULTIPLY"
self.connect_rgb_node(material, color, multiply_node.inputs[1])
self.connect_texture_node(material, tex_index, multiply_node.inputs[2])
material.node_tree.links.new(socket_to_connect, multiply_node.outputs[0])
return multiply_node
def node_group_create(
self, material: bpy.types.ShaderNode, shader_node_group_name: str
) -> bpy.types.ShaderNodeGroup:
node_group = material.node_tree.nodes.new("ShaderNodeGroup")
node_group.node_tree = bpy.data.node_groups[shader_node_group_name]
return node_group
def node_placer(self, parent_node: bpy.types.ShaderNode) -> None:
bottom_pos = [parent_node.location[0] - 200, parent_node.location[1]]
for child_node in [
link.from_node for socket in parent_node.inputs for link in socket.links
]:
if child_node.type != "GROUP":
child_node.hide = True
child_node.location = bottom_pos
bottom_pos[1] -= 40
for _ in [
link.from_node for socket in child_node.inputs for link in socket.links
]:
self.node_placer(child_node)
# endregion material_util func
def build_principle_from_gltf_mat(
self, b_mat: bpy.types.Material, pymat: PyMaterialGltf
) -> None:
self.material_init(b_mat)
principled_node = b_mat.node_tree.nodes.new("ShaderNodeBsdfPrincipled")
b_mat.node_tree.links.new(
b_mat.node_tree.nodes["Material Output"].inputs["Surface"],
principled_node.outputs["BSDF"],
)
# self.connect_with_color_multiply_node(
# b_mat, pymat.base_color, pymat.color_texture_index, principled_node.inputs["Base Color"]
# )
if pymat.color_texture_index is not None:
self.connect_texture_node(
b_mat,
pymat.color_texture_index,
principled_node.inputs["Base Color"],
principled_node.inputs["Alpha"],
)
# self.connect_value_node(b_mat, pymat.metallic_factor,sg.inputs["metallic"])
# self.connect_value_node(b_mat, pymat.roughness_factor,sg.inputs["roughness"])
# self.connect_value_node(b_mat, pymat.metallic_factor,sg.inputs["metallic"])
# self.connect_value_node(b_mat, pymat.roughness_factor,sg.inputs["roughness"])
if pymat.normal_texture_index is not None:
self.connect_texture_node(
b_mat, pymat.normal_texture_index, principled_node.inputs["Normal"]
)
transparent_exchange_dic = {
"OPAQUE": "OPAQUE",
"MASK": "CUTOUT",
"Z_TRANSPARENCY": "Z_TRANSPARENCY",
}
self.set_material_transparent(
b_mat, pymat, transparent_exchange_dic[pymat.alpha_mode]
)
b_mat.use_backface_culling = not pymat.double_sided
def build_material_from_gltf(
self, b_mat: bpy.types.Material, pymat: PyMaterialGltf
) -> None:
self.material_init(b_mat)
gltf_node_name = "GLTF"
shader_node_group_import(gltf_node_name)
sg = self.node_group_create(b_mat, gltf_node_name)
b_mat.node_tree.links.new(
b_mat.node_tree.nodes["Material Output"].inputs["Surface"],
sg.outputs["BSDF"],
)
self.connect_rgb_node(b_mat, pymat.base_color, sg.inputs["base_Color"])
if pymat.color_texture_index is not None:
self.connect_texture_node(
b_mat, pymat.color_texture_index, sg.inputs["color_texture"]
)
self.connect_value_node(b_mat, pymat.metallic_factor, sg.inputs["metallic"])
self.connect_value_node(b_mat, pymat.roughness_factor, sg.inputs["roughness"])
if pymat.metallic_roughness_texture_index is not None:
self.connect_texture_node(
b_mat,
pymat.metallic_roughness_texture_index,
sg.inputs["metallic_roughness_texture"],
)
if isinstance(pymat.emissive_factor, collections.Iterable):
self.connect_rgb_node(
b_mat, [*pymat.emissive_factor, 1], sg.inputs["emissive_color"]
)
if pymat.emissive_texture_index is not None:
self.connect_texture_node(
b_mat, pymat.emissive_texture_index, sg.inputs["emissive_texture"]
)
if pymat.normal_texture_index is not None:
self.connect_texture_node(
b_mat, pymat.normal_texture_index, sg.inputs["normal"]
)
if pymat.occlusion_texture_index is not None:
self.connect_texture_node(
b_mat, pymat.occlusion_texture_index, sg.inputs["occlusion_texture"]
)
self.connect_value_node(b_mat, pymat.shadeless, sg.inputs["unlit"])
transparent_exchange_dic = {
"OPAQUE": "OPAQUE",
"MASK": "CUTOUT",
"Z_TRANSPARENCY": "Z_TRANSPARENCY",
}
self.set_material_transparent(
b_mat, pymat, transparent_exchange_dic[pymat.alpha_mode]
)
b_mat.use_backface_culling = not pymat.double_sided
def build_material_from_mtoon(
self, b_mat: bpy.types.Material, pymat: PyMaterialMtoon
) -> None:
self.material_init(b_mat)
shader_node_group_name = "MToon_unversioned"
sphere_add_vector_node_group_name = "matcap_vector"
shader_node_group_import(shader_node_group_name)
shader_node_group_import(sphere_add_vector_node_group_name)
sg = self.node_group_create(b_mat, shader_node_group_name)
b_mat.node_tree.links.new(
b_mat.node_tree.nodes["Material Output"].inputs["Surface"],
sg.outputs["Emission"],
)
float_prop_exchange_dic = vrm_types.MaterialMtoon.float_props_exchange_dic
for k, v in pymat.float_props_dic.items():
if k == "_CullMode":
if v == 2: # 0: no cull 1:front cull 2:back cull
b_mat.use_backface_culling = True
elif v == 0:
b_mat.use_backface_culling = False
if k in [
key for key, val in float_prop_exchange_dic.items() if val is not None
]:
if v is not None:
self.connect_value_node(
b_mat, v, sg.inputs[float_prop_exchange_dic[k]]
)
else:
b_mat[k] = v
for k, v in pymat.keyword_dic.items():
b_mat[k] = v
uv_offset_tiling_value: Sequence[float] = [0, 0, 1, 1]
vector_props_dic = vrm_types.MaterialMtoon.vector_props_exchange_dic
for k, vec in pymat.vector_props_dic.items():
if k in ["_Color", "_ShadeColor", "_EmissionColor", "_OutlineColor"]:
self.connect_rgb_node(b_mat, vec, sg.inputs[vector_props_dic[k]])
elif k == "_RimColor":
self.connect_rgb_node(
b_mat,
vec,
sg.inputs[vector_props_dic[k]],
default_color=[0, 0, 0, 1],
)
elif k == "_MainTex" and vec is not None:
uv_offset_tiling_value = vec
else:
b_mat[k] = vec
uv_map_node = b_mat.node_tree.nodes.new("ShaderNodeUVMap")
uv_offset_tiling_node = b_mat.node_tree.nodes.new("ShaderNodeMapping")
if bpy.app.version < (2, 81):
uv_offset_tiling_node.translation[0] = uv_offset_tiling_value[0]
uv_offset_tiling_node.translation[1] = uv_offset_tiling_value[1]
uv_offset_tiling_node.scale[0] = uv_offset_tiling_value[2]
uv_offset_tiling_node.scale[1] = uv_offset_tiling_value[3]
else:
uv_offset_tiling_node.inputs["Location"].default_value[
0
] = uv_offset_tiling_value[0]
uv_offset_tiling_node.inputs["Location"].default_value[
1
] = uv_offset_tiling_value[1]
uv_offset_tiling_node.inputs["Scale"].default_value[
0
] = uv_offset_tiling_value[2]
uv_offset_tiling_node.inputs["Scale"].default_value[
1
] = uv_offset_tiling_value[3]
b_mat.node_tree.links.new(
uv_offset_tiling_node.inputs[0], uv_map_node.outputs[0]
)
def connect_uv_map_to_texture(texture_node: bpy.types.ShaderNode) -> None:
b_mat.node_tree.links.new(
texture_node.inputs[0], uv_offset_tiling_node.outputs[0]
)
tex_dic = vrm_types.MaterialMtoon.texture_kind_exchange_dic
for tex_name, tex_index in pymat.texture_index_dic.items():
if tex_index is None:
continue
image_index = self.py_model.json["textures"][tex_index]["source"]
if image_index not in self.images:
continue
if tex_name not in tex_dic.keys():
if "unknown_texture" not in b_mat:
b_mat["unknown_texture"] = {}
b_mat["unknown_texture"].update(
{tex_name: self.py_model.json["textures"][tex_index]["name"]}
)
print(f"unknown texture {tex_name}")
elif tex_name == "_MainTex":
main_tex_node = self.connect_texture_node(
b_mat,
tex_index,
sg.inputs[tex_dic[tex_name]],
sg.inputs[tex_dic[tex_name] + "Alpha"],
)
connect_uv_map_to_texture(main_tex_node)
elif tex_name == "_BumpMap":
# If .blend file already has VRM that is imported by older version,
# 'sg' has old 'MToon_unversioned', which has 'inputs["NomalmapTexture"]'. # noqa: SC100
# But 'tex_dic' holds name that is corrected, and it causes KeyError to reference 'sg' with it
color_socket_name = "NomalmapTexture"
if tex_dic[tex_name] in sg.inputs:
color_socket_name = tex_dic[tex_name]
normalmap_node = self.connect_texture_node(
b_mat,
tex_index,
color_socket_to_connect=sg.inputs[color_socket_name],
)
try:
normalmap_node.image.colorspace_settings.name = "Non-Color"
except TypeError: # non-colorが無いとき
normalmap_node.image.colorspace_settings.name = (
"Linear" # 2.80 beta互換性コード
)
connect_uv_map_to_texture(normalmap_node)
elif tex_name == "_ReceiveShadowTexture":
rs_tex_node = self.connect_texture_node(
b_mat,
tex_index,
alpha_socket_to_connect=sg.inputs[tex_dic[tex_name] + "_alpha"],
)
connect_uv_map_to_texture(rs_tex_node)
elif tex_name == "_SphereAdd":
tex_node = self.connect_texture_node(
b_mat,
tex_index,
color_socket_to_connect=sg.inputs[tex_dic[tex_name]],
)
b_mat.node_tree.links.new(
tex_node.inputs["Vector"],
self.node_group_create(
b_mat, sphere_add_vector_node_group_name
).outputs["Vector"],
)
else:
if tex_dic.get(tex_name) is not None: # Shade,Emissive,Rim,UVanimMask
other_tex_node = self.connect_texture_node(
b_mat,
tex_index,
color_socket_to_connect=sg.inputs[tex_dic[tex_name]],
)
connect_uv_map_to_texture(other_tex_node)
else:
print(f"{tex_name} is unknown texture")
transparent_mode_float = pymat.float_props_dic["_BlendMode"]
transparent_mode = "OPAQUE"
if transparent_mode_float is None:
pass
elif math.fabs(transparent_mode_float - 1) < 0.001:
transparent_mode = "CUTOUT"
elif math.fabs(transparent_mode_float - 2) < 0.001:
transparent_mode = "Z_TRANSPARENCY"
elif math.fabs(transparent_mode_float - 3) < 0.001:
transparent_mode = "Z_TRANSPARENCY"
# Trans_Zwrite(3)も2扱いで。
self.set_material_transparent(b_mat, pymat, transparent_mode)
def build_material_from_transparent_z_write(
self, b_mat: bpy.types.Material, pymat: PyMaterialTransparentZWrite
) -> None:
self.material_init(b_mat)
z_write_transparent_sg = "TRANSPARENT_ZWRITE"
shader_node_group_import(z_write_transparent_sg)
sg = self.node_group_create(b_mat, z_write_transparent_sg)
b_mat.node_tree.links.new(
b_mat.node_tree.nodes["Material Output"].inputs["Surface"],
sg.outputs["Emission"],
)
for k, float_value in pymat.float_props_dic.items():
b_mat[k] = float_value
for k, vec_value in pymat.vector_props_dic.items():
b_mat[k] = vec_value
for tex_name, tex_index_value in pymat.texture_index_dic.items():
if tex_name == "_MainTex" and tex_index_value is not None:
self.connect_texture_node(
b_mat,
tex_index_value,
sg.inputs["Main_Texture"],
sg.inputs["Main_Alpha"],
)
self.set_material_transparent(b_mat, pymat, "Z_TRANSPARENCY")
# endregion material
def make_primitive_mesh_objects(
self, wm: bpy.types.WindowManager, progress: int
) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
self.primitive_obj_dict = {
pymesh[0].object_id: [] for pymesh in self.py_model.meshes
}
morph_cache_dict: Dict[
Tuple[int, int], List[List[float]]
] = {} # key:tuple(POSITION,targets.POSITION),value:points_data
# mesh_obj_build
mesh_progress = 0.0
mesh_progress_unit = 1 / max(1, len(self.py_model.meshes))
for pymesh in self.py_model.meshes:
b_mesh = bpy.data.meshes.new(pymesh[0].name)
face_index = [tri for prim in pymesh for tri in prim.face_indices]
if pymesh[0].POSITION is None:
continue
pos = list(map(self.axis_glb_to_blender, pymesh[0].POSITION))
b_mesh.from_pydata(pos, [], face_index)
b_mesh.update()
obj = bpy.data.objects.new(pymesh[0].name, b_mesh)
obj.parent = self.armature
self.meshes[pymesh[0].object_id] = obj
# region obj setting
# origin 0:Vtype_Node 1:mesh 2:skin
origin = None
for key_is_node_id, node in self.py_model.origin_nodes_dict.items():
if node[1] != pymesh[0].object_id:
continue
# origin boneの場所に移動
obj.location = self.axis_glb_to_blender(node[0].position)
if len(node) == 3:
origin = node
continue
# len=2 ≒ skinがない場合
parent_node_id = None
for node_id, py_node in self.py_model.nodes_dict.items():
if py_node.children is None:
continue
if key_is_node_id in py_node.children:
parent_node_id = node_id
obj.parent_type = "BONE"
if parent_node_id is not None:
obj.parent_bone = armature.data.bones[
self.py_model.nodes_dict[parent_node_id].name
].name
if (
obj.parent_bone is None
or obj.parent_bone not in armature.data.bones
):
continue
# boneのtail側にparentされるので、根元からmesh nodeのpositionに動かしなおす
obj.matrix_world = Matrix.Translation(
[
armature.matrix_world.to_translation()[i]
+ armature.data.bones[
obj.parent_bone
].matrix_local.to_translation()[i]
+ self.axis_glb_to_blender(node[0].position)[i]
for i in range(3)
]
)
scene = self.context.scene
scene.collection.objects.link(obj)
# endregion obj setting
# region vertex groupの作成
if origin is not None:
# TODO bone名の不具合などでリネームが発生してるとうまくいかない
nodes_index_list = self.py_model.skins_joints_list[origin[2]]
# TODO bone名の不具合などでリネームが発生してるとうまくいかない
# VertexGroupに頂点属性から一個ずつウェイトを入れる用の辞書作り
for prim in pymesh:
if prim.JOINTS_0 is not None and prim.WEIGHTS_0 is not None:
# 使うkey(bone名)のvalueを空のリストで初期化(中身まで全部内包表記で?キモすぎるからしない。
vg_dict: Dict[str, List[Tuple[int, float]]] = {
self.py_model.nodes_dict[
nodes_index_list[joint_id]
].name: []
for joint_id in [
joint_id
for joint_ids in prim.JOINTS_0
for joint_id in joint_ids
]
}
for v_index, (joint_ids, weights) in enumerate(
zip(prim.JOINTS_0, prim.WEIGHTS_0)
):
# region VroidがJoints:[18,18,0,0]とかで格納してるからその処理を
normalized_joint_ids = list(dict.fromkeys(joint_ids))
# for deterministic export
def sort_by_vg_dict_key(
sort_data: Tuple[
int,
List[int],
List[int],
Dict[str, List[Tuple[int, float]]],
]
) -> int:
(
sort_joint_id,
sort_joint_ids,
sort_nodes_index_list,
sort_vg_dict,
) = sort_data
name = self.py_model.nodes_dict[
sort_nodes_index_list[sort_joint_id]
].name
keys = list(sort_vg_dict.keys())
if name in keys:
return keys.index(name)
return len(keys) + sort_joint_ids.index(sort_joint_id)
get_first_element: Callable[
[Tuple[int, Any, Any, Any]], int
] = lambda input: input[0]
sorted_joint_ids = map(
get_first_element,
sorted(
zip(
normalized_joint_ids,
itertools.repeat(joint_ids),
itertools.repeat(nodes_index_list),
itertools.repeat(vg_dict),
),
key=sort_by_vg_dict_key,
),
)
normalized_joint_dic: Dict[int, float] = {
joint_id: 0 for joint_id in sorted_joint_ids
}
for i, k in enumerate(joint_ids):
normalized_joint_dic[k] += weights[i]
# endregion VroidがJoints:[18,18,0,0]とかで格納してるからその処理を
for joint_id, weight in normalized_joint_dic.items():
node_id = nodes_index_list[joint_id]
# TODO bone名の不具合などでリネームが発生してるとうまくいかない
vg_dict[self.py_model.nodes_dict[node_id].name].append(
(v_index, weight)
)
vg_list = [] # VertexGroupのリスト
for vg_key in vg_dict.keys():
if vg_key not in obj.vertex_groups:
vg_list.append(obj.vertex_groups.new(name=vg_key))
# 頂点リストに辞書から書き込む
for vg in vg_list:
joint_id_and_weights = vg_dict[vg.name]
for (joint_id, weight) in joint_id_and_weights:
if weight != 0.0:
# 頂点はまとめてリストで追加できるようにしかなってない
vg.add([joint_id], weight, "REPLACE")
obj.modifiers.new("amt", "ARMATURE").object = self.armature
# endregion vertex groupの作成
# region uv
flatten_vrm_mesh_vert_index = [
ind
for prim in pymesh
for ind in itertools.chain.from_iterable(prim.face_indices)
]
for prim in pymesh:
texcoord_num = 0
uv_flag = True
while uv_flag:
channel_name = "TEXCOORD_" + str(texcoord_num)
if hasattr(prim, channel_name):
if channel_name not in b_mesh.uv_layers:
b_mesh.uv_layers.new(name=channel_name)
blender_uv_data = b_mesh.uv_layers[channel_name].data
vrm_texcoord = getattr(prim, channel_name)
for node_id, v_index in enumerate(flatten_vrm_mesh_vert_index):
blender_uv_data[node_id].uv = vrm_texcoord[v_index]
# to blender axis (上下反転)
blender_uv_data[node_id].uv[1] = (
blender_uv_data[node_id].uv[1] * -1 + 1
)
texcoord_num += 1
else:
uv_flag = False
break
# endregion uv
# region Normal #TODO
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
self.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.shade_smooth() # this is need
b_mesh.create_normals_split()
# bpy.ops.mesh.customdata_custom_splitnormals_add()
for prim in pymesh:
if prim.NORMAL is None:
continue
normalized_normal = prim.NORMAL
if (
prim.vert_normal_normalized is None
or not prim.vert_normal_normalized
):
normalized_normal = [
Vector(n)
if abs(Vector(n).magnitude - 1.0) < sys.float_info.epsilon
else Vector(n).normalized()
for n in prim.NORMAL
]
prim.vert_normal_normalized = True
prim.NORMAL = normalized_normal
b_mesh.normals_split_custom_set_from_vertices(
list(map(self.axis_glb_to_blender, normalized_normal))
)
b_mesh.use_auto_smooth = True
# endregion Normal
# region material適用
face_length = 0
for prim in pymesh:
if (
prim.material_index is None
or prim.material_index not in self.vrm_materials
):
continue
if (
self.vrm_materials[prim.material_index].name
not in obj.data.materials
):
obj.data.materials.append(self.vrm_materials[prim.material_index])
mat_index = 0
for i, mat in enumerate(obj.material_slots):
if (
mat.material.name
== self.vrm_materials[prim.material_index].name
):
mat_index = i
tris = len(prim.face_indices)
for n in range(tris):
b_mesh.polygons[face_length + n].material_index = mat_index
face_length = face_length + tris
# endregion material適用
# region vertex_color
# なぜかこれだけ面基準で、loose verts and edgesに色は塗れない
# また、2.79では頂点カラーにalpha(4要素目)がないから完全対応は無理だったが
# 2.80では4要素になった模様
# TODO: テスト (懸案:cleaningで頂点結合でデータ物故割れる説)
for prim in pymesh:
vcolor_count = 0
vc_flag = True
while vc_flag:
vc_color_name = f"COLOR_{vcolor_count}"
if hasattr(prim, vc_color_name):
vc = None
if vc_color_name in b_mesh.vertex_colors:
vc = b_mesh.vertex_colors[vc_color_name]
else:
vc = b_mesh.vertex_colors.new(name=vc_color_name)
for v_index, _ in enumerate(vc.data):
vc.data[v_index].color = getattr(prim, vc_color_name)[
flatten_vrm_mesh_vert_index[v_index]
]
vcolor_count += 1
else:
vc_flag = False
break
# endregion vertex_color
# region shape_key
# shapekey_data_factory with cache
def absolutize_morph_positions(
base_points: List[List[float]],
morph_target_pos_and_index: List[Any],
prim: PyMesh,
) -> List[List[float]]:
shape_key_positions = []
morph_target_pos = morph_target_pos_and_index[0]
morph_target_index = morph_target_pos_and_index[1]
if prim.POSITION_accessor is None:
return []
# すでに変換したことがあるならそれを使う
if (
prim.POSITION_accessor,
morph_target_index,
) in morph_cache_dict:
return morph_cache_dict[
(prim.POSITION_accessor, morph_target_index)
]
for base_pos, morph_pos in zip(base_points, morph_target_pos):
shape_key_positions.append(
self.axis_glb_to_blender(
[base_pos[i] + morph_pos[i] for i in range(3)]
)
)
morph_cache_dict[
(prim.POSITION_accessor, morph_target_index)
] = shape_key_positions
return shape_key_positions
# shapeKeys
for prim in pymesh:
if (
prim.morph_target_point_list_and_accessor_index_dict is None
or b_mesh is None
):
continue
if b_mesh.shape_keys is None:
obj.shape_key_add(name="Basis")
for (
morph_name,
morph_pos_and_index,
) in prim.morph_target_point_list_and_accessor_index_dict.items():
if (
b_mesh.shape_keys is None
or morph_name not in b_mesh.shape_keys.key_blocks
):
obj.shape_key_add(name=morph_name)
if b_mesh.shape_keys is None:
continue
keyblock = b_mesh.shape_keys.key_blocks[morph_name]
if prim.POSITION is not None:
shape_data = absolutize_morph_positions(
prim.POSITION, morph_pos_and_index, prim
)
else:
shape_data = []
for i, co in enumerate(shape_data):
keyblock.data[i].co = co
# endregion shape_key
# progress update
mesh_progress += mesh_progress_unit
wm.progress_update(progress + mesh_progress)
wm.progress_update(progress + 1)
def attach_vrm_attributes(self) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
vrm0_extension = self.vrm0_extension
if vrm0_extension is None:
return
humanbones_relations = deep.get(vrm0_extension, ["humanoid", "humanBones"], [])
if not isinstance(humanbones_relations, list):
raise Exception("extensions.VRM.humanoid.humanBones is not list")
for (i, humanbone) in enumerate(humanbones_relations):
if not isinstance(humanbone, dict):
raise Exception(f"extensions.VRM.humanoid.humanBones[{i}] is not dict")
node_index = humanbone["node"]
if not isinstance(node_index, int):
raise Exception(
f'json extensions.VRM.humanoid.humanBones[{i}]["node"] is not int but {node_index}'
)
node_name = self.py_model.json["nodes"][node_index]["name"]
if node_name not in armature.data.bones:
continue
armature.data.bones[node_name]["humanBone"] = node_index
armature.data[humanbone["bone"]] = armature.data.bones[node_name].name
vrm_meta = vrm0_extension.get("meta", {})
if not isinstance(vrm_meta, dict):
raise Exception("json extensions.VRM.meta is not dict")
for metatag, metainfo in vrm_meta.items():
if metatag == "texture":
if (
"textures" in self.py_model.json
# extensions.VRM.meta.texture could be -1
# https://github.com/vrm-c/UniVRM/issues/91#issuecomment-454284964
and 0 <= metainfo < len(self.py_model.json["textures"])
):
image_index = self.py_model.json["textures"][metainfo]["source"]
if image_index in self.images:
armature[metatag] = self.images[image_index].name
else:
armature[metatag] = metainfo
def json_dump(self) -> None:
if self.vrm_extension is not None:
return
vrm0_extension = self.vrm0_extension
if not isinstance(vrm0_extension, dict):
raise Exception("json extensions VRM is not dict")
textblock = bpy.data.texts.new(name="raw.json")
textblock.write(json.dumps(self.py_model.json, indent=4))
def write_textblock_and_assign_to_armature(block_name: str, value: str) -> None:
text_block = bpy.data.texts.new(name=f"{block_name}.json")
text_block.write(json.dumps(value, indent=4))
armature = self.armature
if armature is None:
raise Exception("armature is None")
armature[f"{block_name}"] = text_block.name
# region humanoid_parameter
humanoid_params = copy.deepcopy(vrm0_extension["humanoid"])
del humanoid_params["humanBones"]
write_textblock_and_assign_to_armature("humanoid_params", humanoid_params)
self.load_humanoid_params(self.armature, humanoid_params)
# endregion humanoid_parameter
# region first_person
first_person_params = copy.deepcopy(vrm0_extension["firstPerson"])
first_person_bone = deep.get(first_person_params, ["firstPersonBone"], -1)
if isinstance(first_person_bone, int) and 0 <= first_person_bone < len(
self.py_model.json["nodes"]
):
first_person_params["firstPersonBone"] = self.py_model.json["nodes"][
first_person_bone
]["name"]
if isinstance(first_person_params.get("meshAnnotations"), list):
# TODO VRM1.0 is using node index that has mesh
for mesh_annotation in first_person_params["meshAnnotations"]:
mesh = mesh_annotation["mesh"]
if isinstance(mesh, int) and 0 <= mesh < len(
self.py_model.json["meshes"]
):
mesh_annotation["mesh"] = self.py_model.json["meshes"][mesh]["name"]
write_textblock_and_assign_to_armature(
"firstPerson_params", first_person_params
)
self.load_first_person_params(self.armature, first_person_params)
# endregion first_person
# region blendshape_master
blendshape_groups = copy.deepcopy(
vrm0_extension["blendShapeMaster"]["blendShapeGroups"]
)
# meshをidから名前に
# weightを0-100から0-1に
# shape_indexを名前に
# TODO VRM1.0 is using node index that has mesh
# materialValuesはそのままで行けるハズ・・・
legacy_vrm0 = False
for blendshape_group in blendshape_groups:
for bind_dic in blendshape_group.get("binds", []):
try:
bind_dic["index"] = self.py_model.json["meshes"][bind_dic["mesh"]][
"primitives"
][0]["extras"]["targetNames"][bind_dic["index"]]
except KeyError:
legacy_vrm0 = True
break
if 0 <= bind_dic["mesh"] < len(self.meshes):
bind_dic["mesh"] = self.meshes[bind_dic["mesh"]].data.name
else:
bind_dic["mesh"] = None
bind_dic["weight"] = bind_dic["weight"] / 100
if legacy_vrm0:
break
if legacy_vrm0:
blendshape_groups = []
write_textblock_and_assign_to_armature("blendshape_group", blendshape_groups)
self.load_blendshape_group(self.armature, blendshape_groups)
# endregion blendshape_master
# region springbone
spring_bonegroup_list = copy.deepcopy(
vrm0_extension["secondaryAnimation"]["boneGroups"]
)
collider_groups_list = vrm0_extension["secondaryAnimation"]["colliderGroups"]
# node_idを管理するのは面倒なので、名前に置き換える
# collider_groupも同じく
for bone_group in spring_bonegroup_list:
center_node_id = bone_group.get("center")
if isinstance(center_node_id, int) and 0 <= center_node_id < len(
self.py_model.json["nodes"]
):
bone_group["center"] = self.py_model.json["nodes"][center_node_id][
"name"
]
bone_group["bones"] = [
self.py_model.json["nodes"][node_id]["name"]
for node_id in bone_group["bones"]
]
bone_group["colliderGroups"] = [
self.py_model.json["nodes"][
collider_groups_list[collider_gp_id]["node"]
]["name"]
for collider_gp_id in bone_group["colliderGroups"]
]
write_textblock_and_assign_to_armature("spring_bone", spring_bonegroup_list)
self.load_spring_bones(self.armature, spring_bonegroup_list)
# endregion springbone
def load_humanoid_params(
self, armature: bpy.types.Armature, humanoid_params: Dict[str, Any]
) -> None:
if not self.use_experimental_vrm_component_ui:
return
props = armature.vrm_props.humanoid_params
props.arm_stretch = humanoid_params["armStretch"]
props.leg_stretch = humanoid_params["legStretch"]
props.upper_arm_twist = humanoid_params["upperArmTwist"]
props.lower_arm_twist = humanoid_params["lowerArmTwist"]
props.upper_leg_twist = humanoid_params["upperLegTwist"]
props.lower_leg_twist = humanoid_params["lowerLegTwist"]
props.feet_spacing = humanoid_params["feetSpacing"]
props.has_translation_dof = humanoid_params["hasTranslationDoF"]
def load_first_person_params(
self, armature: bpy.types.Armature, first_person_params: Dict[str, Any]
) -> None:
if not self.use_experimental_vrm_component_ui:
return
props = armature.vrm_props.first_person_params
props.first_person_bone = first_person_params["firstPersonBone"]
first_person_bone_offset = first_person_params["firstPersonBoneOffset"]
# Axis confusing
props.first_person_bone_offset = (
first_person_bone_offset["x"],
first_person_bone_offset["z"],
first_person_bone_offset["y"],
)
props.mesh_annotations.clear()
for mesh_annotation in first_person_params["meshAnnotations"]:
item = props.mesh_annotations.add()
item.mesh = mesh_annotation["mesh"]
item.first_person_flag = mesh_annotation["firstPersonFlag"]
props.look_at_type_name = first_person_params["lookAtTypeName"]
look_at_horizontal_inner = first_person_params["lookAtHorizontalInner"]
props.look_at_horizontal_inner.curve = look_at_horizontal_inner["curve"]
props.look_at_horizontal_inner.x_range = look_at_horizontal_inner["xRange"]
props.look_at_horizontal_inner.y_range = look_at_horizontal_inner["yRange"]
look_at_horizontal_outer = first_person_params["lookAtHorizontalOuter"]
props.look_at_horizontal_outer.curve = look_at_horizontal_outer["curve"]
props.look_at_horizontal_outer.x_range = look_at_horizontal_outer["xRange"]
props.look_at_horizontal_outer.y_range = look_at_horizontal_outer["yRange"]
look_at_vertical_down = first_person_params["lookAtVerticalDown"]
props.look_at_vertical_down.curve = look_at_vertical_down["curve"]
props.look_at_vertical_down.x_range = look_at_vertical_down["xRange"]
props.look_at_vertical_down.y_range = look_at_vertical_down["yRange"]
look_at_vertical_up = first_person_params["lookAtVerticalUp"]
props.look_at_vertical_up.curve = look_at_vertical_up["curve"]
props.look_at_vertical_up.x_range = look_at_vertical_up["xRange"]
props.look_at_vertical_up.y_range = look_at_vertical_up["yRange"]
def load_blendshape_group(
self, armature: bpy.types.Armature, blendshape_group: List[Dict[str, Any]]
) -> None:
if not self.use_experimental_vrm_component_ui:
return
props = armature.vrm_props.blendshape_group
props.clear()
for blendshape in blendshape_group:
item = props.add()
item.name = blendshape["name"]
item.preset_name = blendshape["presetName"]
item.binds.clear()
binds = blendshape["binds"]
for bind in binds:
added = item.binds.add()
added.mesh = bind["mesh"]
added.index = bind["index"]
added.weight = bind["weight"]
# "materialValues": [],
if "isBinary" in blendshape:
item.is_binary = blendshape["isBinary"]
def load_spring_bones(
self, armature: bpy.types.Armature, spring_bonegroup_list: List[Dict[str, Any]]
) -> None:
if not self.use_experimental_vrm_component_ui:
return
props = armature.vrm_props.spring_bones
props.clear()
for spring_bonegroup in spring_bonegroup_list:
item = props.add()
item.comment = spring_bonegroup["comment"]
item.stiffiness = spring_bonegroup["stiffiness"] # noqa: SC200
item.gravity_power = spring_bonegroup["gravityPower"]
gravity_dir = spring_bonegroup["gravityDir"]
# Axis confusing
item.gravity_dir = (gravity_dir["x"], gravity_dir["z"], gravity_dir["y"])
item.drag_force = spring_bonegroup["dragForce"]
no_reference = -1
if spring_bonegroup["center"] != no_reference:
item.center = spring_bonegroup["center"]
item.hit_radius = spring_bonegroup["hitRadius"]
item.bones.clear()
for bone in spring_bonegroup["bones"]:
added = item.bones.add()
added.name = bone
item.collider_groups.clear()
for bone in spring_bonegroup["colliderGroups"]:
added = item.collider_groups.add()
added.name = bone
def cleaning_data(self) -> None:
# collection setting
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
for obj in self.meshes.values():
self.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.shade_smooth()
bpy.ops.object.select_all(action="DESELECT")
def set_bone_roll(self) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action="DESELECT")
armature.select_set(True)
self.context.view_layer.objects.active = self.armature
bpy.ops.object.mode_set(mode="EDIT")
hb = vrm_types.HumanBones
stop_bone_names = {*armature.data.values()[:]}
def set_children_roll(bone_name: str, roll: float) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
if bone_name in armature.data and armature.data[bone_name] != "":
bone = armature.data.edit_bones[armature.data[bone_name]]
bone.roll = radians(roll)
roll_list = [*bone.children]
while roll_list:
bone = roll_list.pop()
if bone.name in stop_bone_names:
continue
bone.roll = radians(roll)
roll_list.extend(bone.children)
return
for b in hb.center_req + hb.center_def:
if b == "hips":
set_children_roll(b, 90)
else:
set_children_roll(b, -90)
for deg, bs in zip(
[0, 180],
[hb.left_arm_req + hb.left_arm_def, hb.right_arm_req + hb.right_arm_def],
):
for b in bs:
set_children_roll(b, deg)
for b in (
hb.left_leg_req + hb.right_leg_req + hb.left_leg_def + hb.right_leg_def
):
set_children_roll(b, 90)
bpy.ops.object.mode_set(mode="OBJECT")
def put_spring_bone_info(self) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
vrm0_extension = self.vrm0_extension
if vrm0_extension is None:
return
secondary_animation_json = vrm0_extension.get("secondaryAnimation")
if secondary_animation_json is None:
print("no secondary animation object")
return
spring_rootbone_groups_json = secondary_animation_json["boneGroups"]
collider_groups_json = secondary_animation_json["colliderGroups"]
nodes_json = self.py_model.json["nodes"]
for bone_group in spring_rootbone_groups_json:
for bone_id in bone_group["bones"]:
node_name = nodes_json[bone_id]["name"]
if node_name not in armature.data.bones:
continue
bone = armature.data.bones[node_name]
for key, val in bone_group.items():
if key == "bones":
continue
bone[key] = val
collider_objs = []
bpy.context.view_layer.depsgraph.update()
bpy.context.scene.view_layers.update()
for collider_group in collider_groups_json:
collider_base_node = self.py_model.json["nodes"][collider_group["node"]]
node_name = collider_base_node["name"]
for i, collider in enumerate(collider_group["colliders"]):
if node_name not in armature.data.bones:
continue
collider_name = f"{node_name}_collider_{i}"
obj = bpy.data.objects.new(name=collider_name, object_data=None)
obj.parent = self.armature
obj.parent_type = "BONE"
obj.parent_bone = node_name
offset = [
collider["offset"]["x"],
collider["offset"]["y"],
collider["offset"]["z"],
] # values直接はindexアクセス出来ないのでしゃあなし
offset = [
offset[axis] * inv for axis, inv in zip([0, 2, 1], [-1, -1, 1])
] # TODO: Y軸反転はUniVRMのシリアライズに合わせてる
# boneのtail側にparentされるので、根元からのpositionに動かしなおす
obj.matrix_world = Matrix.Translation(
[
armature.matrix_world.to_translation()[i]
+ armature.data.bones[node_name].matrix_local.to_translation()[
i
]
+ offset[i]
for i in range(3)
]
)
obj.empty_display_size = collider["radius"]
obj.empty_display_type = "SPHERE"
collider_objs.append(obj)
if collider_objs:
coll = bpy.data.collections.new("Colliders")
self.context.scene.collection.children.link(coll)
for collider_obj in collider_objs:
coll.objects.link(collider_obj)
def make_pole_target(
self, rl: str, upper_leg_name: str, lower_leg_name: str, foot_name: str
) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
bpy.ops.object.mode_set(mode="EDIT")
edit_bones = armature.data.edit_bones
ik_foot = armature.data.edit_bones.new(f"IK_LEG_TARGET_{rl}")
ik_foot.head = [f + o for f, o in zip(edit_bones[foot_name].head[:], [0, 0, 0])]
ik_foot.tail = [
f + o for f, o in zip(edit_bones[foot_name].head[:], [0, -0.2, 0])
]
pole = armature.data.edit_bones.new(f"leg_pole_{rl}")
pole.parent = ik_foot
pole.head = [
f + o for f, o in zip(edit_bones[lower_leg_name].head[:], [0, -0.1, 0])
]
pole.tail = [
f + o for f, o in zip(edit_bones[lower_leg_name].head[:], [0, -0.2, 0])
]
pole_name = copy.copy(pole.name)
ik_foot_name = copy.copy(ik_foot.name)
bpy.context.view_layer.depsgraph.update()
bpy.context.scene.view_layers.update()
bpy.ops.object.mode_set(mode="POSE")
ikc = armature.pose.bones[lower_leg_name].constraints.new("IK")
ikc.target = armature
ikc.subtarget = armature.pose.bones[ik_foot_name].name
def chain_solver(armature: bpy.types.Armature, child: str, parent: str) -> int:
current_bone = armature.pose.bones[child]
for i in range(10):
if current_bone.name == parent:
return i + 1
current_bone = current_bone.parent
return 11
ikc.chain_count = chain_solver(armature, lower_leg_name, upper_leg_name)
ikc.pole_target = self.armature
ikc.pole_subtarget = pole_name
bpy.context.view_layer.depsgraph.update()
bpy.context.scene.view_layers.update()
def blendfy(self) -> None:
armature = self.armature
if armature is None:
raise Exception("armature is None")
bpy.context.view_layer.objects.active = self.armature
bpy.ops.object.mode_set(mode="EDIT")
edit_bones = armature.data.edit_bones # noqa: F841
right_upper_leg_name = armature.data["rightUpperLeg"]
right_lower_leg_name = armature.data["rightLowerLeg"]
right_foot_name = armature.data["rightFoot"]
left_upper_leg_name = armature.data["leftUpperLeg"]
left_lower_leg_name = armature.data["leftLowerLeg"]
left_foot_name = armature.data["leftFoot"]
self.make_pole_target(
"R", right_upper_leg_name, right_lower_leg_name, right_foot_name
)
self.make_pole_target(
"L", left_upper_leg_name, left_lower_leg_name, left_foot_name
)
bpy.ops.object.mode_set(mode="OBJECT")
bpy.context.view_layer.depsgraph.update()
bpy.context.scene.view_layers.update()
editor.vrm_helper.Bones_rename(bpy.context)
# DeprecationWarning
class ICYP_OT_select_helper(bpy.types.Operator): # type: ignore[misc] # noqa: N801
bl_idname = "mesh.icyp_select_helper"
bl_label = "VRM importer internal only func"
bl_description = "VRM importer internal only"
bl_options = {"REGISTER", "UNDO"}
bpy.types.Scene.icyp_select_helper_select_list = []
def execute(self, context: bpy.types.Context) -> Set[str]:
bpy.ops.object.mode_set(mode="OBJECT")
for vid in bpy.types.Scene.icyp_select_helper_select_list:
bpy.context.active_object.data.vertices[vid].select = True
bpy.ops.object.mode_set(mode="EDIT")
bpy.types.Scene.icyp_select_helper_select_list = []
return {"FINISHED"}
| 43.391784 | 154 | 0.541557 |
cec2334b2146c706e526f103db86924a69b28a48 | 1,664 | py | Python | pandas/challenge_6.py | bobrokerson/kaggle | 96c13e85476e2fe0fdb2af74075f82510db90573 | [
"MIT"
] | null | null | null | pandas/challenge_6.py | bobrokerson/kaggle | 96c13e85476e2fe0fdb2af74075f82510db90573 | [
"MIT"
] | null | null | null | pandas/challenge_6.py | bobrokerson/kaggle | 96c13e85476e2fe0fdb2af74075f82510db90573 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 11 23:08:36 2022
@author: jimmy
"""
""" 31. Write a Pandas program to compute the Euclidean distance between two given series"""
import pandas as pd
from scipy.spatial import distance
import numpy as np
dataset = pd.Series([1,2,3,4,5,6,7,8,9,10])
dataset_1 = pd.Series([11,8,7,5,6,5,3,4,7,1])
print(f"{dataset}, \n{dataset_1}")
distance = distance.euclidean(dataset, dataset_1)
distance_2 = np.linalg.norm(dataset-dataset_1)
print(f"Euclidean distance between two said series: {distance.round(3)} \nLinalg option: {distance_2.round(3)}")
""" 32.Write a Pandas program to find the positions of the values neighboured by smaller values on both sides in a given series"""
dataset_2 = pd.Series([1,8,7,5,6,5,3,4,7,1])
temp = np.diff(np.sign(np.diff(dataset_2)))
result = np.where(temp == -2)[0] + 1
print(f"Positions of the values surrounded by smaller values on both sides: {result}")
""" 33. Write a Pandas program to replace missing white spaces in a given string with the least frequent character"""
str1 = 'abc def abcdef icd'
print(str1)
ser = pd.Series(list(str1))
element_freq = ser.value_counts()
print(element_freq)
current_freq = element_freq.dropna().index[-1]
result = "".join(ser.replace(' ', current_freq))
print(result)
""" 34. Write a Pandas program to compute the autocorrelations of a given numeric series."""
dataset_4 = pd.Series(np.arange(15) + np.random.normal(1, 10, 15))
print(F"Original series: {dataset_4}")
autocorrelations = [dataset_4.autocorr(i).round(2) for i in range(11)]
print("\nAutocorrelations of the said series:")
print(autocorrelations[1:])
| 32 | 130 | 0.722957 |
cb50f8501c5c8b6559047537ee7c9a7f41ff6679 | 3,411 | py | Python | utils/data_utils.py | SunnyMarkLiu/Datacastle_Travel_Services_Predict | 0823a8aaab4e42a7ee5067171901c6f597bc5d7e | [
"MIT"
] | 69 | 2018-02-08T09:38:12.000Z | 2019-10-14T00:11:04.000Z | utils/data_utils.py | QianguoSun/Datacastle_Travel_Services_Predict | 0823a8aaab4e42a7ee5067171901c6f597bc5d7e | [
"MIT"
] | null | null | null | utils/data_utils.py | QianguoSun/Datacastle_Travel_Services_Predict | 0823a8aaab4e42a7ee5067171901c6f597bc5d7e | [
"MIT"
] | 33 | 2018-02-08T15:32:33.000Z | 2019-07-20T08:52:26.000Z | #!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-8-6 下午3:12
"""
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
from os import listdir
from os.path import isfile, join
import cPickle
import numpy as np
import pandas as pd
from conf.configure import Configure
def load_features(features_name):
train_path = Configure.features_path + 'train_' + features_name + '.pkl'
test_path = Configure.features_path + 'test_' + features_name + '.pkl'
with open(train_path, "rb") as f:
train = cPickle.load(f)
with open(test_path, "rb") as f:
test = cPickle.load(f)
return train, test
def save_features(train, test, features_name):
if train is not None:
train_path = Configure.features_path + 'train_' + features_name + '.pkl'
with open(train_path, "wb") as f:
cPickle.dump(train, f, -1)
if test is not None:
test_path = Configure.features_path + 'test_' + features_name + '.pkl'
with open(test_path, "wb") as f:
cPickle.dump(test, f, -1)
def is_feature_created(feature_name):
feature_files = [f for f in listdir(Configure.features_path) if isfile(join(Configure.features_path, f))]
exit_feature = sum([feature_name in feature for feature in feature_files]) > 0
return exit_feature
def load_action_sequence_label_for_nn():
""" 加载训练神经网络的数据集 """
train = pd.read_csv(Configure.base_path + 'train/orderFuture_train.csv', encoding='utf8')
test = pd.read_csv(Configure.base_path + 'test/orderFuture_test.csv', encoding='utf8')
action_train = pd.read_csv(Configure.base_path + 'train/action_train.csv')
action_test = pd.read_csv(Configure.base_path + 'test/action_test.csv')
# 训练集文本
train_seqs = []
train_y = []
action_grouped = dict(list(action_train.groupby('userid')))
for uid in train['userid'].values:
train_seqs.append(' '.join(action_grouped[uid]['actionType'].astype(str).values.tolist()))
train_y.append(train[train['userid'] == uid]['orderType'].values[0])
test_seqs = []
action_grouped = dict(list(action_test.groupby('userid')))
for uid in test['userid'].values:
test_seqs.append(' '.join(action_grouped[uid]['actionType'].astype(str).values.tolist()))
return train_seqs, train_y, test_seqs
class DataWrapper(object):
def __init__(self, x, y=None, istrain=False, is_shuffle=True):
self.x = x
self.y = y
self.pointer = 0
self.total_count = self.x.shape[0]
self.istrain = istrain
self.is_shuffle = is_shuffle
def shuffle(self):
shuffled_index = np.arange(0, self.total_count)
np.random.seed(10)
np.random.shuffle(shuffled_index)
self.x = self.x[shuffled_index]
if self.istrain:
self.y = self.y[shuffled_index]
def load_all_data(self):
return self.next_batch(self.x.shape[0])
def next_batch(self, batch_size):
end = self.pointer + batch_size
batch_x = self.x[self.pointer: end]
batch_y = None
if self.istrain:
batch_y = self.y[self.pointer: end]
self.pointer = end
if self.pointer >= self.total_count:
self.pointer = 0
if self.is_shuffle:
self.shuffle()
return batch_x, batch_y
| 30.455357 | 109 | 0.653474 |
214d0aea4e91c7993f674b6df7cd6725cf3382cb | 49,282 | py | Python | text2cc/quiz.py | dlehman83/text2cc | 303798993590bceaeb5238a6cce82893c37cdfc7 | [
"BSD-3-Clause"
] | 1 | 2021-02-12T09:34:07.000Z | 2021-02-12T09:34:07.000Z | text2cc/quiz.py | dlehman83/text2cc | 303798993590bceaeb5238a6cce82893c37cdfc7 | [
"BSD-3-Clause"
] | null | null | null | text2cc/quiz.py | dlehman83/text2cc | 303798993590bceaeb5238a6cce82893c37cdfc7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, Dana Lehman
# Copyright (c) 2020, Geoffrey M. Poore
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
'''
Parse text into a Quiz object that contains a list of Question objects, each
of which contains a list of Choice objects.
'''
import hashlib
import io
import itertools
import locale
import pathlib
import platform
import re
import shutil
import subprocess
import tempfile
from typing import Dict, List, Optional, Set, Union
from .config import Config
from .err import Text2qtiError
from .markdown import Image, Markdown
# regex patterns for parsing quiz content
start_patterns = {
'question': r'\d+\.',
'mctf_correct_choice': r'\*[a-zA-Z]\)',
'mctf_incorrect_choice': r'[a-zA-Z]\)',
'multans_correct_choice': r'\[\*\]',
'multans_incorrect_choice': r'\[ ?\]',
'shortans_correct_choice': r'\*',
'feedback': r'\.\.\.',
'correct_feedback': r'\+',
'incorrect_feedback': r'\-',
'essay': r'___+',
'upload': r'\^\^\^+',
'numerical': r'=',
'question_title': r'[Tt]itle:',
'question_points': r'[Pp]oints:',
'text_title': r'[Tt]ext [Tt]itle:',
'text': r'[Tt]ext:',
'quiz_title': r'[Qq]uiz [Tt]itle:',
'quiz_description': r'[Qq]uiz [Dd]escription:',
'start_group': r'GROUP',
'end_group': r'END_GROUP',
'group_pick': r'[Pp]ick:',
'group_points_per_question': r'[Pp]oints per question:',
#'start_code': r'```+\s*\S.*',
#'end_code': r'```+',
'quiz_shuffle_answers': r'[Ss]huffle answers:',
'quiz_show_correct_answers': r'[Ss]how correct answers:',
'quiz_one_question_at_a_time': r'[Oo]ne question at a time:',
'quiz_cant_go_back': r'''[Cc]an't go back:''',
}
# comments are currently handled separately from content
comment_patterns = {
'start_multiline_comment': r'COMMENT',
'end_multiline_comment': r'END_COMMENT',
'line_comment': r'%',
}
# whether regex needs to check after pattern for content on the same line
no_content = set(['essay', 'upload', 'start_group', 'end_group'])
# whether parser needs to check for multi-line content
single_line = set(['question_points', 'group_pick', 'group_points_per_question',
'numerical', 'shortans_correct_choice',
'quiz_shuffle_answers', 'quiz_show_correct_answers',
'quiz_one_question_at_a_time', 'quiz_cant_go_back'])
multi_line = set([x for x in start_patterns
if x not in no_content and x not in single_line])
# whether parser needs to check for multi-paragraph content
multi_para = set([x for x in multi_line if 'title' not in x])
start_re = re.compile('|'.join(r'(?P<{0}>{1}[ \t]+(?=\S))'.format(name, pattern)
if name not in no_content else
r'(?P<{0}>{1}\s*)$'.format(name, pattern)
for name, pattern in start_patterns.items()))
start_missing_content_re = re.compile('|'.join(r'(?P<{0}>{1}[ \t]*$)'.format(name, pattern)
for name, pattern in start_patterns.items()
if name not in no_content))
start_missing_whitespace_re = re.compile('|'.join(r'(?P<{0}>{1}(?=\S))'.format(name, pattern)
for name, pattern in start_patterns.items()
if name not in no_content))
int_re = re.compile('(?:0|[+-]?[1-9](?:[0-9]+|_[0-9]+)*)$')
class TextRegion(object):
'''
A text region between questions.
'''
def __init__(self, *, index: int, md: Markdown):
self.title_raw: Optional[str] = None
self.title_xml = ''
self.text_raw: Optional[str] = None
self.text_html_xml = ''
self.md = md
self._index = index
def _set_id(self):
h = hashlib.blake2b()
h.update(f'{self._index}'.encode('utf8'))
h.update(h.digest())
h.update(self.title_xml.encode('utf8'))
h.update(h.digest())
h.update(self.text_html_xml.encode('utf8'))
self.id = h.hexdigest()[:64]
def set_title(self, text: str):
if self.title_raw is not None:
raise Text2qtiError('Text title has already been set')
if self.text_raw is not None:
raise Text2qtiError('Must set text title before text itself')
self.title_raw = text
self.title_xml = self.md.xml_escape(text)
self._set_id()
def set_text(self, text: str):
if self.text_raw is not None:
raise Text2qtiError('Text has already been set')
self.text_raw = text
self.text_html_xml = self.md.md_to_html_xml(text)
self._set_id()
class Choice(object):
'''
A choice for a question plus optional feedback.
The id is based on a hash of both the question and the choice itself.
The presence of feedback does not affect the id.
'''
def __init__(self, text: str, *,
correct: bool, shortans=False,
question_hash_digest: bytes, md: Markdown):
self.choice_raw = text
if shortans:
self.choice_xml = md.xml_escape(text)
else:
self.choice_html_xml = md.md_to_html_xml(text)
self.correct = correct
self.shortans = shortans
self.feedback_raw: Optional[str] = None
self.feedback_html_xml: Optional[str] = None
# ID is based on hash of choice XML as well as question XML. This
# gives different IDs for identical choices in different questions.
if shortans:
self.id = hashlib.blake2b(self.choice_xml.encode('utf8'), key=question_hash_digest).hexdigest()[:64]
else:
self.id = hashlib.blake2b(self.choice_html_xml.encode('utf8'), key=question_hash_digest).hexdigest()[:64]
self.md = md
def append_feedback(self, text: str):
if self.feedback_raw is not None:
raise Text2qtiError('Feedback can only be specified once')
self.feedback_raw = text
self.feedback_html_xml = self.md.md_to_html_xml(text)
class Question(object):
'''
A question, along with a list of possible choices and optional feedback of
various types.
'''
def __init__(self, text: str, *, title: Optional[str], points: Optional[str], md: Markdown):
# Question type is set once it is known. For true/false or multiple
# choice, this is done during .finalize(), once all choices are
# available. For essay, this is done as soon as essay response is
# specified.
self.type: Optional[str] = None
if title is None:
self.title_raw: Optional[str] = None
self.title_xml = 'Question'
else:
self.title_raw: Optional[str] = title
self.title_xml = md.xml_escape(title)
self.question_raw = text
self.question_html_xml = md.md_to_html_xml(text)
self.choices: List[Choice] = []
# The set for detecting duplicate choices uses the XML version of the
# choices, to avoid the issue of multiple Markdown representations of
# the same XML.
self._choice_set: Set[str] = set()
self.numerical_min: Optional[Union[int, float]] = None
self.numerical_min_html_xml: Optional[str] = None
self.numerical_exact: Optional[Union[int, float]] = None
self.numerical_exact_html_xml: Optional[str] = None
self.numerical_max: Optional[Union[int, float]] = None
self.numerical_max_html_xml: Optional[str] = None
self.correct_choices = 0
if points is None:
self.points_possible_raw: Optional[str] = None
self.points_possible: Union[int, float] = 1
else:
self.points_possible_raw: Optional[str] = points
try:
points_num = float(points)
except ValueError:
raise Text2qtiError(f'Invalid points value "{points}"; need positive integer or half-integer')
if points_num <= 0:
raise Text2qtiError(f'Invalid points value "{points}"; need positive integer or half-integer')
if points_num.is_integer():
points_num = int(points)
elif abs(points_num-round(points_num)) != 0.5:
raise Text2qtiError(f'Invalid points value "{points}"; need positive integer or half-integer')
self.points_possible: Union[int, float] = points_num
self.feedback_raw: Optional[str] = None
self.feedback_html_xml: Optional[str] = None
self.correct_feedback_raw: Optional[str] = None
self.correct_feedback_html_xml: Optional[str] = None
self.incorrect_feedback_raw: Optional[str] = None
self.incorrect_feedback_html_xml: Optional[str] = None
h = hashlib.blake2b(self.question_html_xml.encode('utf8'))
self.hash_digest = h.digest()
self.id = h.hexdigest()[:64]
self.md = md
def append_feedback(self, text: str):
if self.type in ('essay_question', 'file_upload_question', 'numerical_question'):
raise Text2qtiError('Question feedback must immediately follow the question')
if not self.choices:
if self.feedback_raw is not None:
raise Text2qtiError('Feedback can only be specified once')
self.feedback_raw = text
self.feedback_html_xml = self.md.md_to_html_xml(text)
else:
self.choices[-1].append_feedback(text)
def append_correct_feedback(self, text: str):
if self.type in ('essay_question', 'file_upload_question'):
raise Text2qtiError(f'Question type "{self.type}" does not support correct feedback')
if self.choices or self.type == 'numerical_question':
raise Text2qtiError('Correct feedback can only be specified for questions')
if self.correct_feedback_raw is not None:
raise Text2qtiError('Feedback can only be specified once')
self.correct_feedback_raw = text
self.correct_feedback_html_xml = self.md.md_to_html_xml(text)
def append_incorrect_feedback(self, text: str):
if self.type in ('essay_question', 'file_upload_question'):
raise Text2qtiError(f'Question type "{self.type}" does not support incorrect feedback')
if self.choices or self.type == 'numerical_question':
raise Text2qtiError('Incorrect feedback can only be specified for questions')
if self.incorrect_feedback_raw is not None:
raise Text2qtiError('Feedback can only be specified once')
self.incorrect_feedback_raw = text
self.incorrect_feedback_html_xml = self.md.md_to_html_xml(text)
def append_mctf_correct_choice(self, text: str):
if self.type is not None:
raise Text2qtiError(f'Question type "{self.type}" does not support multiple choice')
choice = Choice(text, correct=True, question_hash_digest=self.hash_digest, md=self.md)
if choice.choice_html_xml in self._choice_set:
raise Text2qtiError('Duplicate choice for question')
self._choice_set.add(choice.choice_html_xml)
self.choices.append(choice)
self.correct_choices += 1
def append_mctf_incorrect_choice(self, text: str):
if self.type is not None:
raise Text2qtiError(f'Question type "{self.type}" does not support multiple choice')
choice = Choice(text, correct=False, question_hash_digest=self.hash_digest, md=self.md)
if choice.choice_html_xml in self._choice_set:
raise Text2qtiError('Duplicate choice for question')
self._choice_set.add(choice.choice_html_xml)
self.choices.append(choice)
def append_shortans_correct_choice(self, text: str):
if self.type is None:
self.type = 'short_answer_question'
if self.choices:
raise Text2qtiError(f'Question type "{self.type}" is not compatible with existing choices')
elif self.type != 'short_answer_question':
raise Text2qtiError(f'Question type "{self.type}" does not support short answer')
choice = Choice(text, correct=True, shortans=True, question_hash_digest=self.hash_digest, md=self.md)
if choice.choice_xml in self._choice_set:
raise Text2qtiError('Duplicate choice for question')
self._choice_set.add(choice.choice_xml)
self.choices.append(choice)
self.correct_choices += 1
def append_multans_correct_choice(self, text: str):
if self.type is None:
self.type = 'multiple_answers_question'
if self.choices:
raise Text2qtiError(f'Question type "{self.type}" is not compatible with existing choices')
elif self.type != 'multiple_answers_question':
raise Text2qtiError(f'Question type "{self.type}" does not support multiple answers')
choice = Choice(text, correct=True, question_hash_digest=self.hash_digest, md=self.md)
if choice.choice_html_xml in self._choice_set:
raise Text2qtiError('Duplicate choice for question')
self._choice_set.add(choice.choice_html_xml)
self.choices.append(choice)
self.correct_choices += 1
def append_multans_incorrect_choice(self, text: str):
if self.type is None:
self.type = 'multiple_answers_question'
if self.choices:
raise Text2qtiError(f'Question type "{self.type}" is not compatible with existing choices')
elif self.type != 'multiple_answers_question':
raise Text2qtiError(f'Question type "{self.type}" does not support multiple answers')
choice = Choice(text, correct=False, question_hash_digest=self.hash_digest, md=self.md)
if choice.choice_html_xml in self._choice_set:
raise Text2qtiError('Duplicate choice for question')
self._choice_set.add(choice.choice_html_xml)
self.choices.append(choice)
def append_essay(self, text: str):
if text:
# The essay response indicator consumes its entire line, leaving
# the empty string; `text` just gives all append functions
# the same form.
raise ValueError
if self.type is not None:
if self.type == 'essay_question':
raise Text2qtiError(f'Cannot specify essay response multiple times')
raise Text2qtiError(f'Question type "{self.type}" does not support essay response')
self.type = 'essay_question'
if self.choices:
raise Text2qtiError(f'Question type "{self.type}" is not compatible with existing choices')
if any(x is not None for x in (self.correct_feedback_raw, self.incorrect_feedback_raw)):
raise Text2qtiError(f'Question type "{self.type}" does not support correct/incorrect feedback')
def append_upload(self, text: str):
if text:
# The upload response indicator consumes its entire line, leaving
# the empty string; `text` just gives all append functions
# the same form.
raise ValueError
if self.type is not None:
if self.type == 'file_upload_question':
raise Text2qtiError(f'Cannot specify upload response multiple times')
raise Text2qtiError(f'Question type "{self.type}" does not support upload response')
self.type = 'file_upload_question'
if self.choices:
raise Text2qtiError(f'Question type "{self.type}" is not compatible with existing choices')
if any(x is not None for x in (self.correct_feedback_raw, self.incorrect_feedback_raw)):
raise Text2qtiError(f'Question type "{self.type}" does not support correct/incorrect feedback')
def append_numerical(self, text: str):
if self.type is not None:
if self.type == 'numerical_question':
raise Text2qtiError(f'Cannot specify numerical response multiple times')
raise Text2qtiError(f'Question type "{self.type}" does not support numerical response')
self.type = 'numerical_question'
if self.choices:
raise Text2qtiError(f'Question type "{self.type}" is not compatible with existing choices')
if text.startswith('['):
if not text.endswith(']') or ',' not in text:
raise Text2qtiError('Invalid numerical response; need "[<min>, <max>]" or "<number> +- <margin>" or "<integer>"')
min, max = text[1:-1].split(',', 1)
try:
min = float(min)
max = float(max)
except Exception:
raise Text2qtiError('Invalid numerical response; need "[<min>, <max>]" or "<number> +- <margin>" or "<integer>"')
if min > max:
raise Text2qtiError('Invalid numerical response; need "[<min>, <max>]" with min < max')
self.numerical_min = min
self.numerical_max = max
if min.is_integer() and max.is_integer():
self.numerical_min_html_xml = f'{min}'
self.numerical_max_html_xml = f'{max}'
else:
self.numerical_min_html_xml = f'{min:.4f}'
self.numerical_max_html_xml = f'{max:.4f}'
elif '+-' in text:
num, margin = text.split('+-', 1)
if margin.endswith('%'):
margin_is_percentage = True
margin = margin[:-1]
else:
margin_is_percentage = False
try:
num = float(num)
margin = float(margin)
except Exception:
raise Text2qtiError('Invalid numerical response; need "[<min>, <max>]" or "<number> +- <margin>" or "<integer>"')
if margin < 0:
raise Text2qtiError('Invalid numerical response; need "<number> +- <margin>" with margin > 0')
if margin_is_percentage:
min = num - abs(num)*(margin/100)
max = num + abs(num)*(margin/100)
else:
min = num - margin
max = num + margin
self.numerical_min = min
self.numerical_exact = num
self.numerical_max = max
if min.is_integer() and num.is_integer() and max.is_integer():
self.numerical_min_html_xml = f'{min}'
self.numerical_exact_html_xml = f'{num}'
self.numerical_max_html_xml = f'{max}'
else:
self.numerical_min_html_xml = f'{min:.4f}'
self.numerical_exact_html_xml = f'{num:.4f}'
self.numerical_max_html_xml = f'{max:.4f}'
elif int_re.match(text):
num = int(text)
min = max = num
self.numerical_min = min
self.numerical_exact = num
self.numerical_max = max
self.numerical_min_html_xml = f'{min}'
self.numerical_exact_html_xml = f'{num}'
self.numerical_max_html_xml = f'{max}'
else:
raise Text2qtiError('Invalid numerical response; need "[<min>, <max>]" or "<number> +- <margin>" or "<integer>"')
if abs(min) < 1e-4 or abs(max) < 1e-4:
raise Text2qtiError('Invalid numerical response; all acceptable values must have a magnitude >= 0.0001')
def finalize(self):
if self.type is None:
if len(self.choices) == 2 and all(c.choice_raw in ('true', 'True', 'false', 'False') for c in self.choices):
self.type = 'true_false_question'
else:
self.type = 'multiple_choice_question'
if not self.choices:
raise Text2qtiError('Question must provide choices')
if len(self.choices) < 2:
raise Text2qtiError('Question must provide more than one choice')
if self.correct_choices < 1:
raise Text2qtiError('Question must specify a correct choice')
if self.correct_choices > 1:
raise Text2qtiError('Question must specify only one correct choice')
elif self.type == 'short_answer_question':
if not self.choices:
raise Text2qtiError('Question must provide at least one answer')
elif self.type == 'multiple_answers_question':
# There must be at least one choice for the type to be set, so
# don't need to check for zero choices
if len(self.choices) < 2:
raise Text2qtiError('Question must provide more than one choice')
if self.correct_choices < 1:
raise Text2qtiError('Question must specify a correct choice')
class Group(object):
'''
A group of questions. A random subset of the questions in a group is
actually displayed.
'''
def __init__(self):
self.pick = 1
self._pick_is_set = False
self.points_per_question = 1
self._points_per_question_is_set = False
self.questions: List[Question] = []
self._question_points_possible: Optional[Union[int, float]] = None
self.title_raw: Optional[str] = None
self.title_xml = 'Group'
def append_group_pick(self, text: str):
if self.questions:
raise Text2qtiError('Question group options must be set at the very start of the group')
if self._pick_is_set:
Text2qtiError('"Pick" has already been set for this question group')
try:
self.pick = int(text)
except Exception as e:
raise Text2qtiError(f'"Pick" value is invalid (must be positive number):\n{e}')
if self.pick <= 0:
raise Text2qtiError(f'"Pick" value is invalid (must be positive number)')
self._pick_is_set = True
def append_group_points_per_question(self, text: str):
if self.questions:
raise Text2qtiError('Question group options must be set at the very start of the group')
if self._points_per_question_is_set:
Text2qtiError('"Points per question" has already been set for this question group')
try:
self.points_per_question = int(text)
except Exception as e:
raise Text2qtiError(f'"Points per question" value is invalid (must be positive number):\n{e}')
if self.points_per_question <= 0:
raise Text2qtiError(f'"Points per question" value is invalid (must be positive number):')
self._points_per_question_is_set = True
def append_question(self, question: Question):
if self._question_points_possible is None:
self._question_points_possible = question.points_possible
elif question.points_possible != self._question_points_possible:
raise Text2qtiError('Question groups must only contain questions with the same point value')
self.questions.append(question)
def finalize(self):
if len(self.questions) <= self.pick:
raise Text2qtiError(f'Question group only contains {len(self.questions)} questions, needs at least {self.pick+1}')
h = hashlib.blake2b()
for digest in sorted(q.hash_digest for q in self.questions):
h.update(digest)
self.hash_digest = h.digest()
self.id = h.hexdigest()[:64]
class GroupStart(object):
'''
Start delim for a group of questions.
'''
def __init__(self, group: Group):
self.group = group
class GroupEnd(object):
'''
End delim for a group of questions.
'''
def __init__(self, group: Group):
self.group = group
class Quiz(object):
'''
A quiz or assessment. Contains a list of questions along with possible
choices and feedback.
'''
def __init__(self, string: str, *, config: Config,
source_name: Optional[str]=None,
resource_path: Optional[Union[str, pathlib.Path]]=None):
self.string = string
self.config = config
self.source_name = '<string>' if source_name is None else f'"{source_name}"'
if resource_path is not None:
if isinstance(resource_path, str):
resource_path = pathlib.Path(resource_path)
else:
raise TypeError
if not resource_path.is_dir():
raise Text2qtiError(f'Resource path "{resource_path.as_posix()}" does not exist')
self.resource_path = resource_path
self.title_raw = None
self.title_xml = 'Quiz'
self.description_raw = None
self.description_html_xml = ''
self.shuffle_answers_raw = None
self.shuffle_answers_xml = 'false'
self.show_correct_answers_raw = None
self.show_correct_answers_xml = 'true'
self.one_question_at_a_time_raw = None
self.one_question_at_a_time_xml = 'false'
self.cant_go_back_raw = None
self.cant_go_back_xml = 'false'
self.questions_and_delims: List[Union[Question, GroupStart, GroupEnd, TextRegion]] = []
self._current_group: Optional[Group] = None
# The set for detecting duplicate questions uses the XML version of
# the question, to avoid the issue of multiple Markdown
# representations of the same XML.
self.question_set: Set[str] = set()
self.md = Markdown(config)
self.images: Dict[str, Image] = self.md.images
self._next_question_attr = {}
# Determine how to interpret `.python` for executable code blocks.
# If `python3` exists, use it instead of `python` if `python` does not
# exist or if `python` is equivalent to `python2`.
if not shutil.which('python2') or not shutil.which('python3'):
python_executable = 'python'
elif not shutil.which('python'):
python_executable = 'python3'
elif pathlib.Path(shutil.which('python')).resolve() == pathlib.Path(shutil.which('python2')).resolve():
python_executable = 'python3'
else:
python_executable = 'python'
try:
parse_actions = {}
for k in start_patterns:
parse_actions[k] = getattr(self, f'append_{k}')
parse_actions[None] = self.append_unknown
start_multiline_comment_pattern = comment_patterns['start_multiline_comment']
end_multiline_comment_pattern = comment_patterns['end_multiline_comment']
line_comment_pattern = comment_patterns['line_comment']
n_line_iter = iter(x for x in enumerate(string.splitlines()))
n, line = next(n_line_iter, (0, None))
lookahead = False
#
while line is not None:
match = start_re.match(line)
if match:
action = match.lastgroup
text = line[match.end():].strip()
if action in multi_line:
if start_patterns[action].endswith(':'):
indent_expandtabs = None
else:
indent_expandtabs = ' '*len(line[:match.end()].expandtabs(4))
text_lines = [text]
n, line = next(n_line_iter, (0, None))
line_expandtabs = line.expandtabs(4) if line is not None else None
lookahead = True
while (line is not None and
(not line or line.isspace() or
indent_expandtabs is None or line_expandtabs.startswith(indent_expandtabs))):
if not line or line.isspace():
if action in multi_para:
text_lines.append('')
else:
break
else:
if indent_expandtabs is None:
if not line.startswith(' '):
break
indent_expandtabs = ' '*(len(line_expandtabs)-len(line_expandtabs.lstrip(' ')))
if len(indent_expandtabs) < 2:
raise Text2qtiError(f'In {self.source_name} on line {n+1}:\nIndentation must be at least 2 spaces or 1 tab here')
# The `rstrip()` prevents trailing double
# spaces from becoming `<br />`.
text_lines.append(line_expandtabs[len(indent_expandtabs):].rstrip())
n, line = next(n_line_iter, (0, None))
line_expandtabs = line.expandtabs(4) if line is not None else None
text = '\n'.join(text_lines)
elif line.startswith(line_comment_pattern):
n, line = next(n_line_iter, (0, None))
continue
elif line.startswith(start_multiline_comment_pattern):
if line.strip() != start_multiline_comment_pattern:
raise Text2qtiError(f'In {self.source_name} on line {n+1}:\nUnexpected content after "{start_multiline_comment_pattern}"')
n, line = next(n_line_iter, (0, None))
while line is not None and not line.startswith(end_multiline_comment_pattern):
n, line = next(n_line_iter, (0, None))
if line is None:
raise Text2qtiError(f'In {self.source_name} on line {n+1}:\nf"{start_multiline_comment_pattern}" without following "{end_multiline_comment_pattern}"')
if line.strip() != end_multiline_comment_pattern:
raise Text2qtiError(f'In {self.source_name} on line {n+1}:\nUnexpected content after "{end_multiline_comment_pattern}"')
n, line = next(n_line_iter, (0, None))
continue
elif line.startswith(end_multiline_comment_pattern):
raise Text2qtiError(f'In {self.source_name} on line {n+1}:\n"{end_multiline_comment_pattern}" without preceding "{start_multiline_comment_pattern}"')
else:
action = None
text = line
try:
parse_actions[action](text)
except Text2qtiError as e:
if lookahead:
raise Text2qtiError(f'In {self.source_name} on line {n}:\n{e}')
raise Text2qtiError(f'In {self.source_name} on line {n+1}:\n{e}')
if not lookahead:
n, line = next(n_line_iter, (0, None))
lookahead = False
if not self.questions_and_delims:
raise Text2qtiError('No questions were found')
if self._current_group is not None:
raise Text2qtiError(f'In {self.source_name} on line {len(string.splitlines())}:\nQuestion group never ended')
last_question_or_delim = self.questions_and_delims[-1]
if isinstance(last_question_or_delim, Question):
try:
last_question_or_delim.finalize()
except Text2qtiError as e:
raise Text2qtiError(f'In {self.source_name} on line {len(string.splitlines())}:\n{e}')
points_possible = 0
digests = []
for x in self.questions_and_delims:
if isinstance(x, Question):
points_possible += x.points_possible
digests.append(x.hash_digest)
elif isinstance(x, GroupStart):
points_possible += x.group.points_per_question*len(x.group.questions)
digests.append(x.group.hash_digest)
elif isinstance(x, GroupEnd):
pass
elif isinstance(x, TextRegion):
pass
else:
raise TypeError
self.points_possible = points_possible
h = hashlib.blake2b()
for digest in sorted(digests):
h.update(digest)
self.hash_digest = h.digest()
self.id = h.hexdigest()[:64]
finally:
self.md.finalize()
def append_quiz_title(self, text: str):
if any(x is not None for x in (self.shuffle_answers_raw, self.show_correct_answers_raw,
self.one_question_at_a_time_raw, self.cant_go_back_raw)):
raise Text2qtiError('Must give quiz title before quiz options')
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.title_raw is not None:
raise Text2qtiError('Quiz title has already been given')
if self.questions_and_delims:
raise Text2qtiError('Must give quiz title before questions')
if self.description_raw is not None:
raise Text2qtiError('Must give quiz title before quiz description')
self.title_raw = text
self.title_xml = self.md.xml_escape(text)
def append_quiz_description(self, text: str):
if any(x is not None for x in (self.shuffle_answers_raw, self.show_correct_answers_raw,
self.one_question_at_a_time_raw, self.cant_go_back_raw)):
raise Text2qtiError('Must give quiz description before quiz options')
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.description_raw is not None:
raise Text2qtiError('Quiz description has already been given')
if self.questions_and_delims:
raise Text2qtiError('Must give quiz description before questions')
self.description_raw = text
self.description_html_xml = self.md.md_to_html_xml(text)
def append_quiz_shuffle_answers(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.questions_and_delims:
raise Text2qtiError('Must give quiz options before questions')
if self.shuffle_answers_raw is not None:
raise Text2qtiError('Quiz option "Shuffle answers" has already been set')
if text not in ('true', 'True', 'false', 'False'):
raise Text2qtiError('Expected option value "true" or "false"')
self.shuffle_answers_raw = text
self.shuffle_answers_xml = text.lower()
def append_quiz_show_correct_answers(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.questions_and_delims:
raise Text2qtiError('Must give quiz options before questions')
if self.show_correct_answers_raw is not None:
raise Text2qtiError('Quiz option "Show correct answers" has already been set')
if text not in ('true', 'True', 'false', 'False'):
raise Text2qtiError('Expected option value "true" or "false"')
self.show_correct_answers_raw = text
self.show_correct_answers_xml = text.lower()
def append_quiz_one_question_at_a_time(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.questions_and_delims:
raise Text2qtiError('Must give quiz options before questions')
if self.one_question_at_a_time_raw is not None:
raise Text2qtiError('Quiz option "One question at a time" has already been set')
if text not in ('true', 'True', 'false', 'False'):
raise Text2qtiError('Expected option value "true" or "false"')
self.one_question_at_a_time_raw = text
self.one_question_at_a_time_xml = text.lower()
def append_quiz_cant_go_back(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.questions_and_delims:
raise Text2qtiError('Must give quiz options before questions')
if self.cant_go_back_raw is not None:
raise Text2qtiError('''Quiz option "Can't go back" has already been set''')
if text not in ('true', 'True', 'false', 'False'):
raise Text2qtiError('Expected option value "true" or "false"')
if self.one_question_at_a_time_xml != 'true':
raise Text2qtiError('''Must set "One question at a time" to "true" before setting "Can't go back"''')
self.cant_go_back_raw = text
self.cant_go_back_xml = text.lower()
def append_text_title(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.questions_and_delims:
last_question_or_delim = self.questions_and_delims[-1]
if isinstance(last_question_or_delim, Question):
last_question_or_delim.finalize()
text_region = TextRegion(index=len(self.questions_and_delims), md=self.md)
text_region.set_title(text)
self.questions_and_delims.append(text_region)
def append_text(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self.questions_and_delims:
last_question_or_delim = self.questions_and_delims[-1]
if isinstance(last_question_or_delim, Question):
last_question_or_delim.finalize()
if isinstance(last_question_or_delim, TextRegion) and last_question_or_delim.text_raw is None:
last_question_or_delim.set_text(text)
else:
text_region = TextRegion(index=len(self.questions_and_delims), md=self.md)
text_region.set_text(text)
self.questions_and_delims.append(text_region)
else:
text_region = TextRegion(index=len(self.questions_and_delims), md=self.md)
text_region.set_text(text)
self.questions_and_delims.append(text_region)
def append_question(self, text: str):
if self.questions_and_delims:
last_question_or_delim = self.questions_and_delims[-1]
if isinstance(last_question_or_delim, Question):
last_question_or_delim.finalize()
question = Question(text,
title=self._next_question_attr.get('title'),
points=self._next_question_attr.get('points'),
md=self.md)
self._next_question_attr = {}
if question.question_html_xml in self.question_set:
raise Text2qtiError('Duplicate question')
self.question_set.add(question.question_html_xml)
self.questions_and_delims.append(question)
if self._current_group is not None:
self._current_group.append_question(question)
def append_question_title(self, text: str):
if 'title' in self._next_question_attr:
raise Text2qtiError('Title for next question has already been set')
if 'points' in self._next_question_attr:
raise Text2qtiError('Title for next question must be set before point value')
self._next_question_attr['title'] = text
def append_question_points(self, text: str):
if 'points' in self._next_question_attr:
raise Text2qtiError('Points for next question has already been set')
self._next_question_attr['points'] = text
def append_feedback(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have feedback without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have feedback without a question')
last_question_or_delim.append_feedback(text)
def append_correct_feedback(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have feedback without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have feedback without a question')
last_question_or_delim.append_correct_feedback(text)
def append_incorrect_feedback(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have feedback without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have feedback without a question')
last_question_or_delim.append_incorrect_feedback(text)
def append_mctf_correct_choice(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim.append_mctf_correct_choice(text)
def append_mctf_incorrect_choice(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim.append_mctf_incorrect_choice(text)
def append_shortans_correct_choice(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have an answer without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have an answer without a question')
last_question_or_delim.append_shortans_correct_choice(text)
def append_multans_correct_choice(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim.append_multans_correct_choice(text)
def append_multans_incorrect_choice(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have a choice without a question')
last_question_or_delim.append_multans_incorrect_choice(text)
def append_essay(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have an essay response without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have an essay response without a question')
last_question_or_delim.append_essay(text)
def append_upload(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have an upload response without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have an upload response without a question')
last_question_or_delim.append_upload(text)
def append_numerical(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if not self.questions_and_delims:
raise Text2qtiError('Cannot have a numerical response without a question')
last_question_or_delim = self.questions_and_delims[-1]
if not isinstance(last_question_or_delim, Question):
raise Text2qtiError('Cannot have a numerical response without a question')
last_question_or_delim.append_numerical(text)
def append_start_group(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if text:
raise ValueError
if self._current_group is not None:
raise Text2qtiError('Question groups cannot be nested')
if self.questions_and_delims:
last_question_or_delim = self.questions_and_delims[-1]
if isinstance(last_question_or_delim, Question):
last_question_or_delim.finalize()
group = Group()
self._current_group = group
self.questions_and_delims.append(GroupStart(group))
def append_end_group(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if text:
raise ValueError
if self._current_group is None:
raise Text2qtiError('No question group to end')
if self.questions_and_delims:
last_question_or_delim = self.questions_and_delims[-1]
if isinstance(last_question_or_delim, Question):
last_question_or_delim.finalize()
self._current_group.finalize()
self.questions_and_delims.append(GroupEnd(self._current_group))
self._current_group = None
def append_group_pick(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self._current_group is None:
raise Text2qtiError('No question group for setting properties')
self._current_group.append_group_pick(text)
def append_group_points_per_question(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if self._current_group is None:
raise Text2qtiError('No question group for setting properties')
self._current_group.append_group_points_per_question(text)
def append_unknown(self, text: str):
if self._next_question_attr:
raise Text2qtiError('Expected question; question title and/or points were set but not used')
if text and not text.isspace():
match = start_missing_whitespace_re.match(text)
if match:
raise Text2qtiError(f'Missing whitespace after "{match.group().strip()}"')
match = start_missing_content_re.match(text)
if match:
raise Text2qtiError(f'Missing content after "{match.group().strip()}"')
raise Text2qtiError(f'Syntax error; unexpected text, or incorrect indentation for a wrapped paragraph:\n"{text}"')
| 49.331331 | 174 | 0.631021 |
face77e1f7624c83524fd198e30a9b23df4d22b9 | 1,098 | py | Python | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/bed_subtract_basewise.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/bed_subtract_basewise.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/bed_subtract_basewise.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 1 | 2020-07-25T21:03:18.000Z | 2020-07-25T21:03:18.000Z | #!/afs/bx.psu.edu/project/pythons/linux-x86_64-ucs4/bin/python2.7
"""
Find continuous regions that are covered by the first bed file (`bed_file_1`)
but not by the second bed file (`bed_file_2`)
usage: %prog bed_file_1 bed_file_2
"""
import sys
from warnings import warn
from bx.bitset_builders import binned_bitsets_from_file
from bx.cookbook import doc_optparse
def print_bits_as_bed( bits ):
end = 0
while 1:
start = bits.next_set( end )
if start == bits.size: break
end = bits.next_clear( start )
print "%s\t%d\t%d" % ( chrom, start, end )
options, args = doc_optparse.parse( __doc__ )
try:
in_fname, in2_fname = args
except:
doc_optparse.exit()
# Read first bed into some bitsets
bitsets1 = binned_bitsets_from_file( open( in_fname ) )
bitsets2 = binned_bitsets_from_file( open( in2_fname ) )
for chrom in bitsets1:
if chrom not in bitsets1:
continue
bits1 = bitsets1[chrom]
if chrom in bitsets2:
bits2 = bitsets2[chrom]
bits2.invert()
bits1.iand( bits2 )
print_bits_as_bed( bits1 )
| 24.954545 | 77 | 0.684882 |
0b1b04bfc557d58d191a4038ceb860865af26b94 | 4,195 | py | Python | lib/ezdxf/tools/rgb.py | tapnair/DXFer | 8ec957d80c2f251bb78440147d1478106f99b3eb | [
"MIT"
] | 4 | 2019-03-31T00:41:13.000Z | 2021-07-31T05:09:07.000Z | lib/ezdxf/tools/rgb.py | tapnair/DXFer | 8ec957d80c2f251bb78440147d1478106f99b3eb | [
"MIT"
] | null | null | null | lib/ezdxf/tools/rgb.py | tapnair/DXFer | 8ec957d80c2f251bb78440147d1478106f99b3eb | [
"MIT"
] | 5 | 2018-03-29T06:28:07.000Z | 2021-07-31T05:09:08.000Z | # Purpose: work with true color values
# Created: 03.07.2015 taken from my dxfgrabber package
# Copyright (C) 2011, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
def int2rgb(value):
return (
(value >> 16) & 0xFF, # red
(value >> 8) & 0xFF, # green
value & 0xFF, # blue
)
def rgb2int(rgb):
r, g, b = rgb
return ((int(r) & 0xff) << 16) | ((int(g) & 0xff) << 8) | (int(b) & 0xff)
def aci2rgb(index):
if index < 1:
raise IndexError(index)
return int2rgb(dxf_default_colors[index])
dxf_default_colors = [
0x000000,
0xff0000,
0xffff00,
0x00ff00,
0x00ffff,
0x0000ff,
0xff00ff,
0xffffff,
0x414141,
0x808080,
0xff0000,
0xffaaaa,
0xbd0000,
0xbd7e7e,
0x810000,
0x815656,
0x680000,
0x684545,
0x4f0000,
0x4f3535,
0xff3f00,
0xffbfaa,
0xbd2e00,
0xbd8d7e,
0x811f00,
0x816056,
0x681900,
0x684e45,
0x4f1300,
0x4f3b35,
0xff7f00,
0xffd4aa,
0xbd5e00,
0xbd9d7e,
0x814000,
0x816b56,
0x683400,
0x685645,
0x4f2700,
0x4f4235,
0xffbf00,
0xffeaaa,
0xbd8d00,
0xbdad7e,
0x816000,
0x817656,
0x684e00,
0x685f45,
0x4f3b00,
0x4f4935,
0xffff00,
0xffffaa,
0xbdbd00,
0xbdbd7e,
0x818100,
0x818156,
0x686800,
0x686845,
0x4f4f00,
0x4f4f35,
0xbfff00,
0xeaffaa,
0x8dbd00,
0xadbd7e,
0x608100,
0x768156,
0x4e6800,
0x5f6845,
0x3b4f00,
0x494f35,
0x7fff00,
0xd4ffaa,
0x5ebd00,
0x9dbd7e,
0x408100,
0x6b8156,
0x346800,
0x566845,
0x274f00,
0x424f35,
0x3fff00,
0xbfffaa,
0x2ebd00,
0x8dbd7e,
0x1f8100,
0x608156,
0x196800,
0x4e6845,
0x134f00,
0x3b4f35,
0x00ff00,
0xaaffaa,
0x00bd00,
0x7ebd7e,
0x008100,
0x568156,
0x006800,
0x456845,
0x004f00,
0x354f35,
0x00ff3f,
0xaaffbf,
0x00bd2e,
0x7ebd8d,
0x00811f,
0x568160,
0x006819,
0x45684e,
0x004f13,
0x354f3b,
0x00ff7f,
0xaaffd4,
0x00bd5e,
0x7ebd9d,
0x008140,
0x56816b,
0x006834,
0x456856,
0x004f27,
0x354f42,
0x00ffbf,
0xaaffea,
0x00bd8d,
0x7ebdad,
0x008160,
0x568176,
0x00684e,
0x45685f,
0x004f3b,
0x354f49,
0x00ffff,
0xaaffff,
0x00bdbd,
0x7ebdbd,
0x008181,
0x568181,
0x006868,
0x456868,
0x004f4f,
0x354f4f,
0x00bfff,
0xaaeaff,
0x008dbd,
0x7eadbd,
0x006081,
0x567681,
0x004e68,
0x455f68,
0x003b4f,
0x35494f,
0x007fff,
0xaad4ff,
0x005ebd,
0x7e9dbd,
0x004081,
0x566b81,
0x003468,
0x455668,
0x00274f,
0x35424f,
0x003fff,
0xaabfff,
0x002ebd,
0x7e8dbd,
0x001f81,
0x566081,
0x001968,
0x454e68,
0x00134f,
0x353b4f,
0x0000ff,
0xaaaaff,
0x0000bd,
0x7e7ebd,
0x000081,
0x565681,
0x000068,
0x454568,
0x00004f,
0x35354f,
0x3f00ff,
0xbfaaff,
0x2e00bd,
0x8d7ebd,
0x1f0081,
0x605681,
0x190068,
0x4e4568,
0x13004f,
0x3b354f,
0x7f00ff,
0xd4aaff,
0x5e00bd,
0x9d7ebd,
0x400081,
0x6b5681,
0x340068,
0x564568,
0x27004f,
0x42354f,
0xbf00ff,
0xeaaaff,
0x8d00bd,
0xad7ebd,
0x600081,
0x765681,
0x4e0068,
0x5f4568,
0x3b004f,
0x49354f,
0xff00ff,
0xffaaff,
0xbd00bd,
0xbd7ebd,
0x810081,
0x815681,
0x680068,
0x684568,
0x4f004f,
0x4f354f,
0xff00bf,
0xffaaea,
0xbd008d,
0xbd7ead,
0x810060,
0x815676,
0x68004e,
0x68455f,
0x4f003b,
0x4f3549,
0xff007f,
0xffaad4,
0xbd005e,
0xbd7e9d,
0x810040,
0x81566b,
0x680034,
0x684556,
0x4f0027,
0x4f3542,
0xff003f,
0xffaabf,
0xbd002e,
0xbd7e8d,
0x81001f,
0x815660,
0x680019,
0x68454e,
0x4f0013,
0x4f353b,
0x333333,
0x505050,
0x696969,
0x828282,
0xbebebe,
0xffffff,
]
| 14.719298 | 77 | 0.573063 |
82f529d160bc38dad7b3d13b279d503137a68a4e | 11,014 | py | Python | test/functional/test_framework/test_node.py | mitchelvanamstel/flits-core | e6c01dce389511a78b1f6a6896a6d6e97af45c64 | [
"MIT"
] | 2 | 2020-06-30T09:55:56.000Z | 2021-03-20T06:20:35.000Z | test/functional/test_framework/test_node.py | mitchelvanamstel/flits-core | e6c01dce389511a78b1f6a6896a6d6e97af45c64 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_node.py | mitchelvanamstel/flits-core | e6c01dce389511a78b1f6a6896a6d6e97af45c64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for flsd node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import time
from .authproxy import JSONRPCException
from .util import (
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 600
class TestNode():
"""A class for representing a flsd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 600
if binary is None:
self.binary = os.getenv("BITCOIND", "flsd")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "fls-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("flsd started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the flsd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
time.sleep(5)
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "flsd exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
while self.rpc.getblockcount() < 0:
time.sleep(1)
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to flsd")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
time.sleep(20)
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes flsd to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to fls-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with fls-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run fls-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same fls-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| 38.376307 | 248 | 0.625931 |
b9dd134614b85970df267c69d0100aaa3914540d | 690 | py | Python | Other-models/ucr-fordA-expROCKET.py | alirezaghods/PIPNet | 8608cf5a4c0722c2aeaf8fddbb2d003b259611b7 | [
"MIT"
] | null | null | null | Other-models/ucr-fordA-expROCKET.py | alirezaghods/PIPNet | 8608cf5a4c0722c2aeaf8fddbb2d003b259611b7 | [
"MIT"
] | null | null | null | Other-models/ucr-fordA-expROCKET.py | alirezaghods/PIPNet | 8608cf5a4c0722c2aeaf8fddbb2d003b259611b7 | [
"MIT"
] | null | null | null | import sys
sys.path.append('./')
from datasets.fordA import load_data
from rocket_functions import generate_kernels, apply_kernels
from sklearn.linear_model import RidgeClassifierCV
(x_train, y_train, pic_train), (x_test, y_test, pic_test) = load_data()
(x_train, y_train, pic_train), (x_test, y_test, pic_test) = load_data()
print(x_train.shape)
kernels = generate_kernels(x_train.shape[-1], 10_000)
X_training_transform = apply_kernels(x_train, kernels)
classifier = RidgeClassifierCV(alphas = np.logspace(-3, 3, 10), normalize = True)
classifier.fit(X_training_transform, y_train)
X_test_transform = apply_kernels(X_test, kernels)
predictions = classifier.predict(X_test_transform)
| 34.5 | 81 | 0.798551 |
6ddc28063b0379b464ba46f8fe2af03639216dc6 | 2,771 | py | Python | scripts/standardize_interwiki.py | leucosticte/speedypywikibot | f03d2070aa5d30a1f82540b0eda6fa603eb341f9 | [
"MIT"
] | 3 | 2019-02-14T13:59:34.000Z | 2021-11-08T09:23:03.000Z | scripts/standardize_interwiki.py | leucosticte/speedypywikibot | f03d2070aa5d30a1f82540b0eda6fa603eb341f9 | [
"MIT"
] | null | null | null | scripts/standardize_interwiki.py | leucosticte/speedypywikibot | f03d2070aa5d30a1f82540b0eda6fa603eb341f9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Loop over all pages in the home wiki, standardizing the interwiki links.
Parameters:
-start: - Set from what page you want to start
"""
#
# (C) Rob W.W. Hooft, 2003
# (C) Pywikibot team, 2003-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import pywikibot
from pywikibot import textlib
# The summary that the Bot will use.
comment = {
'ar': u'روبوت: توحيد قياسي للإنترويكي',
'cs': 'Robot: standardizace interwiki',
'de': u'Bot: Interwikilinks standardisieren',
'en': u'Robot: Interwiki standardization',
'fa': u'ربات: تصحیح جایگذاری میانویکیها',
'fr': u'Robot : Standardisation des interwikis',
'he': u'בוט: מסדר את האינטרוויקי',
'hi': 'बॉट: अंतरविकि मानकीकरण',
'it': u'Bot: Standardizzo interwiki',
'ja': u'ロボットによる: 言語間リンクを標準化',
'ksh': 'Bot: Engerwiki Lengks opprüühme',
'ml': u'യന്ത്രം: അന്തർവിക്കി ക്രമവൽക്കരണം',
'nds': 'Bot: Links twüschen Wikis standardisseern',
'nl': u'Bot: standaardisatie interwikiverwijzingen',
'no': u'bot: Språklenkestandardisering',
'ro': 'Robot: Standardizare interwiki',
'ur': 'خودکار: بین الویکی روابط کی معیار بندی',
'zh': u'機器人: 跨語連結標準化',
}
def main(*args):
"""Process command line arguments and run the script."""
start = '!'
# Load the default parameters and start
for arg in pywikibot.handle_args():
if arg.startswith('-start'):
if len(arg) == 6:
start = pywikibot.input('From what page do you want to start?')
else:
start = arg[7:]
site = pywikibot.Site()
comm = pywikibot.translate(site, comment)
for pl in site.allpages(start):
plname = pl.title()
pywikibot.output(u'\nLoading %s...' % plname)
try:
oldtext = pl.get()
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect!" % plname)
continue
old = pl.interwiki()
new = {}
for pl2 in old:
new[pl2.site] = pywikibot.Page(pl2)
newtext = textlib.replaceLanguageLinks(oldtext, new, site=site)
if new:
if oldtext != newtext:
pywikibot.showDiff(oldtext, newtext)
# Submit changes
try:
pl.put(newtext, comment=comm)
except pywikibot.LockedPage:
pywikibot.output(u"%s is locked" % plname)
continue
else:
pywikibot.output(u'No changes needed.')
continue
else:
pywikibot.output(u'No interwiki found.')
continue
if __name__ == '__main__':
main()
| 30.788889 | 79 | 0.585709 |
3dd713ad66b1310db7484d5d8f82d733b58aaa21 | 5,685 | py | Python | modoboa_radicale/backends/caldav_.py | Toniob/modoboa-radicale | 2ff783c1ba6e7d37b72de618cc33676e682328fe | [
"MIT"
] | null | null | null | modoboa_radicale/backends/caldav_.py | Toniob/modoboa-radicale | 2ff783c1ba6e7d37b72de618cc33676e682328fe | [
"MIT"
] | null | null | null | modoboa_radicale/backends/caldav_.py | Toniob/modoboa-radicale | 2ff783c1ba6e7d37b72de618cc33676e682328fe | [
"MIT"
] | null | null | null | """CalDAV calendar backend."""
import datetime
import uuid
import caldav
from caldav.elements import dav, ical
import icalendar
from django.utils import timezone
from django.utils.encoding import smart_str
from modoboa.parameters import tools as param_tools
from . import CalendarBackend
class Caldav_Backend(CalendarBackend):
"""CalDAV calendar backend."""
def __init__(self, username, password, calendar=None):
"""Constructor."""
super(Caldav_Backend, self).__init__(calendar)
server_url = smart_str(
param_tools.get_global_parameter("server_location"))
self.client = caldav.DAVClient(
server_url,
username=username, password=password)
if self.calendar:
self.remote_cal = self.client.calendar(calendar.encoded_path)
def _serialize_event(self, event):
"""Convert a vevent to a dictionary."""
vevent = event.instance.walk("vevent")[0]
result = {
"id": vevent["uid"],
"title": vevent["summary"],
"color": self.calendar.color,
"description": vevent.get("description", ""),
"calendar": self.calendar,
"attendees": []
}
if isinstance(vevent["dtstart"].dt, datetime.datetime):
all_day = False
start = vevent["dtstart"].dt
end = vevent["dtend"].dt
else:
tz = timezone.get_current_timezone()
all_day = True
start = tz.localize(
datetime.datetime.combine(
vevent["dtstart"].dt, datetime.time.min))
end = tz.localize(
datetime.datetime.combine(
vevent["dtend"].dt, datetime.time.min))
result.update({
"allDay": all_day,
"start": start,
"end": end
})
attendees = vevent.get("attendee", [])
if isinstance(attendees, icalendar.vCalAddress):
attendees = [attendees]
for attendee in attendees:
result["attendees"].append({
"display_name": attendee.params.get("cn"),
"email": smart_str(attendee).replace("MAILTO:", "")
})
return result
def create_calendar(self, url):
"""Create a new calendar."""
self.client.mkcalendar(url)
def update_calendar(self, calendar):
"""Update an existing calendar."""
remote_cal = self.client.calendar(calendar.encoded_path)
remote_cal.set_properties([dav.DisplayName(calendar.name),
ical.CalendarColor(calendar.color)])
def create_event(self, data):
"""Create a new event."""
uid = uuid.uuid4()
cal = icalendar.Calendar()
evt = icalendar.Event()
evt.add("uid", uid)
evt.add("summary", data["title"])
if not data["allDay"]:
evt.add("dtstart", data["start"])
evt.add("dtend", data["end"])
else:
evt.add("dtstart", data["start"].date())
evt.add("dtend", data["end"].date())
cal.add_component(evt)
self.remote_cal.add_event(cal)
return uid
def update_event(self, uid, original_data):
"""Update an existing event."""
data = dict(original_data)
url = "{}/{}.ics".format(self.remote_cal.url.geturl(), uid)
cal = self.remote_cal.event_by_url(url)
orig_evt = cal.instance.walk("vevent")[0]
if "title" in data:
orig_evt["summary"] = data["title"]
if "allDay" in data:
if data["allDay"]:
data["start"] = data["start"].date()
data["end"] = data["end"].date()
if "start" in data:
del orig_evt["dtstart"]
orig_evt.add("dtstart", data["start"])
if "end" in data:
del orig_evt["dtend"]
orig_evt.add("dtend", data["end"])
if "description" in data:
orig_evt["description"] = data["description"]
for attdef in data.get("attendees", []):
attendee = icalendar.vCalAddress(
"MAILTO:{}".format(attdef["email"]))
attendee.params["cn"] = icalendar.vText(attdef["display_name"])
attendee.params["ROLE"] = icalendar.vText('REQ-PARTICIPANT')
del orig_evt["attendee"]
orig_evt.add("attendee", attendee, encode=0)
cal.instance.subcomponents = []
cal.instance.add_component(orig_evt)
if "calendar" in data and self.calendar.pk != data["calendar"].pk:
# Calendar has been changed, remove old event first.
self.remote_cal.client.delete(url)
remote_cal = self.client.calendar(data["calendar"].encoded_path)
url = "{}/{}.ics".format(remote_cal.url.geturl(), uid)
else:
remote_cal = self.remote_cal
remote_cal.add_event(cal.instance)
return uid
def get_event(self, uid):
"""Retrieve and event using its uid."""
url = "{}/{}.ics".format(self.remote_cal.url.geturl(), uid)
event = self.remote_cal.event_by_url(url)
return self._serialize_event(event)
def get_events(self, start, end):
"""Retrieve a list of events."""
orig_events = self.remote_cal.date_search(start, end)
events = []
for event in orig_events:
events.append(self._serialize_event(event))
return events
def delete_event(self, uid):
"""Delete an event using its uid."""
url = "{}/{}.ics".format(self.remote_cal.url.geturl(), uid)
self.remote_cal.client.delete(url)
| 36.677419 | 76 | 0.572208 |
ec47dd2c2270eeda2dcc02a822e190668522a8e1 | 8,932 | py | Python | slash2/utils/tsuite2/utils/ssh.py | zhihui-slash2/slash2-next | d174a735f9860d2d9e31c47548bda67257400804 | [
"0BSD"
] | null | null | null | slash2/utils/tsuite2/utils/ssh.py | zhihui-slash2/slash2-next | d174a735f9860d2d9e31c47548bda67257400804 | [
"0BSD"
] | null | null | null | slash2/utils/tsuite2/utils/ssh.py | zhihui-slash2/slash2-next | d174a735f9860d2d9e31c47548bda67257400804 | [
"0BSD"
] | null | null | null | import paramiko, getpass, logging
import os, re, errno
from os import path
from time import sleep
log = logging.getLogger('ssh')
logging.getLogger("paramiko").setLevel(logging.WARNING)
class SSH(object):
"""Helpful SSH abstractions for executing remote applications."""
def __init__(self, user, host, password=None, port=22):
"""Initialize SSH object.
Args:
user: username.
host: hostname of connection.
password: user's password. If None, stdin will be prompted for pass.
If the user is using auth_keys, an empty string will work.
port: port of destination's sshd.
Raises: SSHException."""
self.user = user
self.host = host
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#Get password from stdin
# if password is None:
# password = getpass.getpass("{0}'s password: ".format(user))
#Initialize connection
try:
self.ssh.connect(host, username=user, password=password, port=port)
except Exception:
raise paramiko.SSHException
self.sftp = self.ssh.open_sftp()
def close(self):
self.sftp.close()
self.ssh.close()
def recursive_copy(self, src, dst):
"""Recursively copy local path to remote path. Not elevated.
Args:
src: local path.
dst: remote path."""
self.make_dirs(dst)
src = src.rstrip(os.sep)
dst = dst.rstrip(os.sep)
for root, dirs, files in os.walk(src):
dst_root = dst + root[len(src):]
for d in dirs:
remote_path = os.path.join(dst_root, d)
self.make_dirs(remote_path)
for f in files:
path = os.path.join(root, f)
remote_path = os.path.join(dst_root, f)
self.copy_file(path, remote_path)
def copy_file(self, src, dst, elevated=False):
"""Copy local file to remote server. Will not be elevated. :(
Args:
src: path to local file.
dst: path to copy to on remote server.
elevated: attempt to hackily escalate privileges
Returns:
True if it copied successfully, False if the src file does not exist.
Can also throw an IOException"""
try:
if os.path.isfile(src):
if elevated:
temp_dst = path.join("/tmp", path.basename(dst))
self.copy_file(src, temp_dst)
self.run("sudo mv {0} {1}".format(temp_dst, dst))
#Seems unnecessary
#log.debug("Copied file {0} to {1} on {2} with elevated privileges".format(path.basename(src), dst, self.host))
else:
s = open(src, "rb")
contents = s.read()
s.close()
f = self.sftp.open(dst, "wb")
f.write(contents)
f.close()
#log.debug("Copied file {0} to {1} on {2}".format(path.basename(src), dst, self.host))
return True
else:
log.error(src + " does not exist locally!")
return False
except IOError, e:
log.error("Cannot copy file {0} to {1} on {2}!".format(src, dst, self.host))
log.error(str(e))
def pull_file(self, rmt, local):
"""Download remote file. Not elevated.
Args:
rmt: path to file on the remote machine.
local: path to store remote file on local machine."""
r = self.sftp.open(rmt, "rb")
contents = r.read()
r.close()
l = open(local, "wb")
l.write(contents)
l.close()
return True
def make_dirs(self, dirs_path, escalate=False):
"""Create remote directories.
Args:
dirs_path: directory path.
force: attempt to elevate and create"""
#log.debug("Making directory {0} on {1}.".format(dirs_path, self.host))
levels = dirs_path.split(os.sep)
for level in range(1, len(levels)):
try:
path = os.sep.join(levels[:level+1])
if escalate:
if self.run("sudo mkdir {0}".format(path), quiet=True)['err'] == []:
self.run("sudo chmod 0777 {0}".format(path))
#log.debug("Created directory {0} on {1} with escalated priveleges.".format(path, self.host))
else:
self.sftp.mkdir(path)
except IOError as error:
if error.errno != None: #directory doesn't exist
log.error("Could not make directory {0} on {1}.".format(path, self.host))
def list_screen_socks(self):
"""Return a list of open screen sockets."""
socks = []
sock_re = re.compile(r"\s+\d+\.(.+?)\s+.*?")
sock_ls = self.run("screen -ls", quiet=True)["out"]
for sock in sock_ls:
sock_match = sock_re.match(sock)
if sock_match:
socks.append(sock_match.group(1))
return socks
def kill_screens(self, sock_name_prefix, exact_sock=False, quiet=False):
"""Kills a remote sock.
Args:
sock_name_prefix: prefix of any socks to kill.
exact_sock: Consider the prefix to be the exact name.
quiet: Silent output.
Returns: number of socks killed."""
sock_list = self.list_screen_socks()
log.debug("Quitting {0}screen sessions: {1}".format("exact " if exact_sock else "", sock_name_prefix))
check = lambda sock: sock == sock_name_prefix if exact_sock else\
lambda sock: sock.startswith(sock_name_prefix)
targeted_socks = filter(check, sock_list)
for sock in targeted_socks:
self.run("screen -X -S {0} quit".format(sock), quiet)
return len(targeted_socks)
def run_screen(self, cmd, sock_name, timeout=None, quiet=False):
"""Remotely execute a command in a screen session. If timeout is reached, screen will be renamed and kept open.
Args:
cmd: command string to be executed.
sock_name: screen session's socket name.
timeout: maximum execution time."""
socks = self.list_screen_socks()
#Sock already exists!
exists = any([s.startswith(sock_name) for s in socks])
if exists:
return False
log.debug("Launching screen: {0} at {1}".format(sock_name, self.host))
#Sanitize newlines
cmd = cmd.strip("\t\n ;")
sane_cmd = ""
for line in cmd.splitlines():
sane_cmd += line.strip() + ";"
cmd = sane_cmd
if quiet:
#Debug -- log the cmds being run
[log.debug(c) for c in cmd.split(";")]
if timeout:
timed_cmd = ""
for line in cmd.split(";"):
if len(line) > 0:
timed_cmd += "sudo timeout --signal=9 {0} {1}; ".format(timeout, line)
cmd = timed_cmd
#Add return code catch to each command
cmd = cmd.replace(";", "; ck; ")
#Wrap the command with a bash condition to rename and keep the screen session open
shell_script = "ck(){{ c=$?; echo $c; if [[ $c != 0 ]]; then screen -S {0} -X zombie kr; if [[ $c == 137 ]]; then screen -S {0} -X sessionname {0}-timed; else screen -S {0} -X sessionname {0}-error; fi; exit; fi; }}".format(sock_name)
cmd = "screen -S {0} -d -L -m $SHELL -c '{2}; {1}'"\
.format(sock_name, cmd, shell_script)
chan = self.ssh.get_transport().open_session()
chan.exec_command(cmd)
return True
def wait_for_screen(self, sock_name, sleep_duration=3):
"""Blocks until a screen sock is removed or timesout.
Args:
sock_name: socket to be looking for.
sleep_duration: time to sleep inbetween checks.
Returns:
dict {
timedout: true/false
finished: true/false
errored: error code
}"""
result = {
"timedout": False,
"finished": False,
"errored": False
}
#initial rest
sleep(sleep_duration)
while True:
alive = False
for sock in self.list_screen_socks():
if sock == sock_name:
alive = True
break
if sock == sock_name + "-timed":
#Screen timed out
result["timedout"] = True
return result
elif sock == sock_name + "-error":
result["errored"] = True
return result
#If it is still running, sleep for a second
if alive: sleep(sleep_duration)
else:
result["finished"] = True
return result
def run(self, cmd, timeout=None, quiet=False):
"""Remotely execute a command.
Args:
cmd: command string to be executed.
timeout: maximum execution time.
Returns:
dict {
out: stdout.
err: stderr.
exit: exit code of the cmd.
timeout returns 137.
}"""
#Sanitize newlines
cmd = cmd.replace("\n", ";")
#Debug -- log the cmds being run
if not quiet:
[log.debug("{0}@{1}:~/$ {2}".format(self.user, self.host, c)) for c in cmd.split(";")]
if timeout:
cmd = "timeout --signal=9 {0} {1}".format(timeout, cmd)
chan = self.ssh.get_transport().open_session()
chan.exec_command(cmd)
result = {
"out" : list(chan.makefile("rb")),
"err" : list(chan.makefile_stderr("rb")),
"exit": chan.recv_exit_status()
}
return result
| 29.381579 | 238 | 0.604792 |
26977c5f4e8c42e89d05e88f77e5cb7d11815b3f | 221 | py | Python | yawhois/parser/joburg_whois_registry_net_za.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | yawhois/parser/joburg_whois_registry_net_za.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | yawhois/parser/joburg_whois_registry_net_za.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | from .za_central_registry import ZaCentralRegistryParser
class JoburgWhoisRegistryNetZaParser(ZaCentralRegistryParser):
def __init__(self, *args):
super(JoburgWhoisRegistryNetZaParser, self).__init__(*args)
| 31.571429 | 67 | 0.81448 |
6b9ba5e896a5a1c30ed423b7833c4cf345240d00 | 2,733 | py | Python | codenotes/util/help.py | EGAMAGZ/code-notes | 960a018d5a6f0bbbe6b94e17e32553fe6d54d3e2 | [
"MIT"
] | 4 | 2021-02-13T05:19:10.000Z | 2021-02-15T06:25:08.000Z | codenotes/util/help.py | EGAMAGZ/code-notes | 960a018d5a6f0bbbe6b94e17e32553fe6d54d3e2 | [
"MIT"
] | 1 | 2021-04-15T03:59:19.000Z | 2021-04-21T20:10:56.000Z | codenotes/util/help.py | EGAMAGZ/codenotes | 960a018d5a6f0bbbe6b94e17e32553fe6d54d3e2 | [
"MIT"
] | null | null | null | from typing import Text, Final
CLI_USAGE_TEXT: Final[Text] = """[quote]Write any thought you have without quitting from the command line[/quote]
[header]USAGE[/header]
codenotes <command> <annotation> <text> <flags>
[header]CORE COMMANDS[/header]
add Create new note or task with the content typed
search Search for notes or tasks with the parameters specified
[header]ANNOTATION[/header]
note/task Type of annotations
[header]FLAGS[/header]
--version, -v Show codenotes version
[header]EXAMPLES[/header]
$ codenotes add task Finish coding the tests --new-categoery Reminders
$ codenotes add task Create documentation for the codenotes proyect; Release the proyect -p
$ codenotes search note --today
[header]FEEDBACK[/header]
Open an issue in [u]github.com/EGAMAGZ/codenotes[/u]"""
ADD_NOTE_USAGE_TEXT: Final[Text] = """[quote]Write any thought you have without quitting from the command line[/quote]
[header]USAGE[/header]
codenotes add note <text> <flags>
[header]FLAGS[/header]
--title,-t <title> Sets a title to the note with a limit of 30 characters. When a title is not specified, it takes
\t\tthe first 30 characters from the note
--category,-c <category> Create a new category if it not exist and will store the note in it
--preview, -p Shows a preview of the note that will be save
[header]USAGE[/header]
$ codenotes add note I got an idea for UI --title UI Idea --category Codenotes"""
ADD_TASK_USAGE_TEXT: Final[Text] = """[quote]Write any thought you have without quitting from the command line[/quote]
[header]USAGE[/header]
codenotes add task <text> <flags>
[header]FLAGS[/header]
--category,-c <category> Create a new category if it not exist and will store the note in it
--preview, -p Shows a preview of the note that will be save
[header]TEXT[/header]
To save two or more task, use the symbol ; to indicate the ending of a task.
[header]USAGE[/header]
$ codenotes add task Finish coding the tests --new-categoery Reminders
$ codenotes add task Create documentation for the codenotes proyect; Release the proyect -p"""
SEARCH_USAGE_TEXT: Final[Text] = """[quote]Write any thought you have without quitting from the command line[/quote]
[header]USAGE[/header]
codenotes search <annotation> <text> <flags>
[header]ANNOTATION[/header]
note/task Type of annotations
[header]TEXT[/header]
Text that will be search if any annotations contains it.
[header]FLAGS[/header]
--today, -t Search annotations created today
--yesterday, -y Search annotations created yesterday
--week, -w Search annotations created in the week
--month, -m Search annotations created in the month
[header]USAGE[/header]
$ codenotes search note --today
$ codenotes search task Finish my project --month"""
| 34.594937 | 118 | 0.754848 |
1d06206b4ad4c4b59416920f764db93f98a761d8 | 9,235 | py | Python | demo_cli.py | Reterno12/Multi-Tacotron-Voice-Cloning | 4cdec83a962b020a2ca92c08329a2f4949453e02 | [
"MIT"
] | null | null | null | demo_cli.py | Reterno12/Multi-Tacotron-Voice-Cloning | 4cdec83a962b020a2ca92c08329a2f4949453e02 | [
"MIT"
] | null | null | null | demo_cli.py | Reterno12/Multi-Tacotron-Voice-Cloning | 4cdec83a962b020a2ca92c08329a2f4949453e02 | [
"MIT"
] | null | null | null | from encoder.params_model import model_embedding_size as speaker_embedding_size
from utils.argutils import print_args
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
import numpy as np
import librosa
import soundfile as sf
import argparse
import torch
import sys
from g2p.train import g2p
if __name__ == '__main__':
## Info & args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-e", "--enc_model_fpath", type=Path,
default="encoder/saved_models/pretrained.pt",
help="Path to a saved encoder")
parser.add_argument("-s", "--syn_model_dir", type=Path,
default="synthesizer/saved_models/logs-pretrained/",
help="Directory containing the synthesizer model")
parser.add_argument("-v", "--voc_model_fpath", type=Path,
default="vocoder/saved_models/pretrained/pretrained.pt",
help="Path to a saved vocoder")
parser.add_argument("--low_mem", action="store_true", help=\
"If True, the memory used by the synthesizer will be freed after each use. Adds large "
"overhead but allows to save some GPU memory for lower-end GPUs.")
parser.add_argument("--no_sound", action="store_true", help=\
"If True, audio won't be played.")
parser.add_argument("-t", "--text",
default="Hello my friends. Я многоязычный синтез построенный на tacotron. Шла саша по шоссе и сосала сушку",
help="Text")
parser.add_argument("-p", "--path_wav", type=Path,
default="ex.wav",
help="wav file")
parser.add_argument("-p2", "--path2_wav", type=Path,
default='outputs\demo_output.wav',
help='wav file')
args = parser.parse_args()
print_args(args, parser)
if not args.no_sound:
import sounddevice as sd
## Print some environment information (for debugging purposes)
print("Running a test of your configuration...\n")
if not torch.cuda.is_available():
print("Your PyTorch installation is not configured to use CUDA. If you have a GPU ready "
"for deep learning, ensure that the drivers are properly installed, and that your "
"CUDA version matches your PyTorch installation. CPU-only inference is currently "
"not supported.", file=sys.stderr)
quit(-1)
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
## Load the models one by one.
print("Preparing the encoder, the synthesizer and the vocoder...")
encoder.load_model(args.enc_model_fpath)
synthesizer = Synthesizer(args.syn_model_dir.joinpath("taco_pretrained"), low_mem=args.low_mem)
vocoder.load_model(args.voc_model_fpath)
## Run a test
print("Testing your configuration with small inputs.")
# Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's
# sampling rate, which may differ.
# If you're unfamiliar with digital audio, know that it is encoded as an array of floats
# (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1.
# The sampling rate is the number of values (samples) recorded per second, it is set to
# 16000 for the encoder. Creating an array of length <sampling_rate> will always correspond
# to an audio of 1 second.
print("\tTesting the encoder...")
encoder.embed_utterance(np.zeros(encoder.sampling_rate))
# Create a dummy embedding. You would normally use the embedding that encoder.embed_utterance
# returns, but here we're going to make one ourselves just for the sake of showing that it's
# possible.
embed = np.random.rand(speaker_embedding_size)
# Embeddings are L2-normalized (this isn't important here, but if you want to make your own
# embeddings it will be).
embed /= np.linalg.norm(embed)
# The synthesizer can handle multiple inputs with batching. Let's create another embedding to
# illustrate that
embeds = [embed, np.zeros(speaker_embedding_size)]
texts = ["test 1", "test 2"]
print("\tTesting the synthesizer... (loading the model will output a lot of text)")
mels = synthesizer.synthesize_spectrograms(texts, embeds)
# The vocoder synthesizes one waveform at a time, but it's more efficient for long ones. We
# can concatenate the mel spectrograms to a single one.
mel = np.concatenate(mels, axis=1)
# The vocoder can take a callback function to display the generation. More on that later. For
# now we'll simply hide it like this:
no_action = lambda *args: None
print("\tTesting the vocoder...")
# For the sake of making this test short, we'll pass a short target length. The target length
# is the length of the wav segments that are processed in parallel. E.g. for audio sampled
# at 16000 Hertz, a target length of 8000 means that the target audio will be cut in chunks of
# 0.5 seconds which will all be generated together. The parameters here are absurdly short, and
# that has a detrimental effect on the quality of the audio. The default parameters are
# recommended in general.
vocoder.infer_waveform(mel, target=200, overlap=50, progress_callback=no_action)
print("All test passed! You can now synthesize speech.\n\n")
## Interactive speech generation
print("This is a GUI-less example of interface to SV2TTS. The purpose of this script is to "
"show how you can interface this project easily with your own. See the source code for "
"an explanation of what is happening.\n")
print("Interactive generation loop")
num_generated = 0
# Get the reference audio filepath
#message = "Reference voice: enter an audio filepath of a voice to be cloned(Введите путь до клонируемого файла, например ex.wav) (mp3, " \
# "wav, m4a, flac, ...):\n"
#in_fpath = Path(input(message).replace("\"", "").replace("\'", ""))
in_fpath = args.path_wav
## Computing the embedding
# First, we load the wav using the function that the speaker encoder provides. This is
# important: there is preprocessing that must be applied.
# The following two methods are equivalent:
# - Directly load from the filepath:
preprocessed_wav = encoder.preprocess_wav(in_fpath)
# - If the wav is already loaded:
original_wav, sampling_rate = librosa.load(in_fpath)
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
print("Loaded file succesfully")
# Then we derive the embedding. There are many functions and parameters that the
# speaker encoder interfaces. These are mostly for in-depth research. You will typically
# only use this function (with its default parameters):
embed = encoder.embed_utterance(preprocessed_wav)
print("Created the embedding")
## Generating the spectrogram
# text = input("Write a sentence (+-20 words) to be synthesized:(Введите предложение для синтеза)\n")
# The synthesizer works in batch, so you need to put your data in a list or numpy array
texts = [args.text]
texts = g2p(texts)
print(texts)
embeds = [embed]
# If you know what the attention layer alignments are, you can retrieve them here by
# passing return_alignments=True
specs = synthesizer.synthesize_spectrograms(texts, embeds)
spec = specs[0]
print("Created the mel spectrogram")
## Generating the waveform
print("Synthesizing the waveform:")
# Synthesizing the waveform is fairly straightforward. Remember that the longer the
# spectrogram, the more time-efficient the vocoder.
generated_wav = vocoder.infer_waveform(spec)
## Post-generation
# There's a bug with sounddevice that makes the audio cut one second earlier, so we
# pad it.
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
# Play the audio (non-blocking)
if not args.no_sound:
sd.stop()
sd.play(generated_wav, synthesizer.sample_rate)
# Save it on the disk
#fpath = "demo_output_%02d.wav" % num_generated #original
fpath = args.path2_wav # I don't need to save all the outputs, because I use telegram
print(generated_wav.dtype)
sf.write(fpath, generated_wav.astype(np.float32), samplerate = synthesizer.sample_rate)
num_generated += 1
print("\nSaved output as %s\n\n" % fpath)
| 46.407035 | 143 | 0.675257 |
c041404e829b98dec7e0e6ca1897be2efde83406 | 2,555 | py | Python | deeppavlov/skills/dsl_skill/handlers/handler.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
] | 5,893 | 2018-02-01T18:13:20.000Z | 2022-03-31T19:22:21.000Z | deeppavlov/skills/dsl_skill/handlers/handler.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
] | 749 | 2018-01-31T11:36:02.000Z | 2022-03-30T07:24:22.000Z | deeppavlov/skills/dsl_skill/handlers/handler.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
] | 1,155 | 2018-02-01T10:52:15.000Z | 2022-03-29T02:12:15.000Z | # Copyright 2019 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
from deeppavlov.skills.dsl_skill.context import UserContext
from deeppavlov.skills.dsl_skill.utils import SkillResponse
class Handler:
"""
Handler instance helps DSLMeta class distinguish functions wrapped
by @DSLMeta.handler to add them to handlers storage.
It also checks if the handler function should be triggered based on the given context.
Attributes:
func: handler function
state: state in which handler can be activated
priority: priority of the function. If 2 or more handlers can be activated, handler
with the highest priority is selected
context_condition: predicate that accepts user context and checks if the handler should be activated. Example:
`lambda context: context.user_id != 1` checks if user_id is not equal to 1.
That means a user with id 1 will be always ignored by the handler.
"""
def __init__(self,
func: Callable,
state: Optional[str] = None,
context_condition: Optional[Callable] = None,
priority: int = 0):
self.func = func
self.state = state
self.context_condition = context_condition
self.priority = priority
def __call__(self, context: UserContext) -> SkillResponse:
return self.func(context)
def check(self, context: UserContext) -> bool:
"""
Checks:
- if the handler function should be triggered based on the given context via context condition.
Args:
context: user context
Returns:
True, if handler should be activated, False otherwise
"""
if self.context_condition is not None:
return self.context_condition(context)
return True
def expand_context(self, context: UserContext) -> UserContext:
context.handler_payload = {}
return context
| 37.028986 | 118 | 0.686106 |
168c4be2301a5eeaf7c0d97fab5dbe57070c67bf | 3,164 | py | Python | biblio/settings.py | tesa123/kurs_django | 36d0c4bc8acc08a367c2745b23be821b3dcbb482 | [
"MIT"
] | null | null | null | biblio/settings.py | tesa123/kurs_django | 36d0c4bc8acc08a367c2745b23be821b3dcbb482 | [
"MIT"
] | null | null | null | biblio/settings.py | tesa123/kurs_django | 36d0c4bc8acc08a367c2745b23be821b3dcbb482 | [
"MIT"
] | null | null | null | """
Django settings for biblio project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9lz6sn3v=-y4yyboyuzi2@rdd@mst4)+_d0^0z0$fe((gy71qq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [] #127.0.0.1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'biblio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'biblio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True # internationalization
USE_L10N = True # localization
USE_TZ = True # timezone
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 26.14876 | 91 | 0.698799 |
83da5a442ecdd4d5bbae60cd8adb0ec288299937 | 6,444 | py | Python | nbsmoke/__init__.py | ContinuumIO/nbsmoke | 7293dfc17708824f041ca6f75a56bafd619cef09 | [
"BSD-3-Clause"
] | 7 | 2019-08-12T17:14:54.000Z | 2021-11-18T23:00:30.000Z | nbsmoke/__init__.py | pyviz-dev/nbsmoke | 7293dfc17708824f041ca6f75a56bafd619cef09 | [
"BSD-3-Clause"
] | 36 | 2019-09-01T13:04:09.000Z | 2021-10-31T16:45:24.000Z | nbsmoke/__init__.py | ContinuumIO/nbsmoke | 7293dfc17708824f041ca6f75a56bafd619cef09 | [
"BSD-3-Clause"
] | 3 | 2018-06-10T01:50:02.000Z | 2019-02-21T07:55:48.000Z | # -*- coding: utf-8 -*-
# Note: created with cookiecutter by someone with no experience of how
# to make a pytest plugin. Please question anything related to the
# pytest integration!
import re
import os
import io
import contextlib
import param
import pytest
import nbformat
import nbconvert
from nbconvert.preprocessors import ExecutePreprocessor
__version__ = str(param.version.Version(
fpath=__file__, archive_commit="$Format:%h$", reponame="panel"))
from .lint import LintNb
from .verify import VerifyNb
def pytest_addoption(parser):
group = parser.getgroup('nbsmoke')
group.addoption(
'--nbsmoke-run',
action="store_true",
help="Run notebooks using nbconvert to check for exceptions.")
group.addoption(
'--nbsmoke-lint',
action="store_true",
help="Lint check notebooks using flake8")
group.addoption(
'--nbsmoke-lint-debug',
action="store_true",
help="Write out copy of python script resulting from conversion of ipynb")
group.addoption(
'--nbsmoke-lint-onlywarn',
action="store_true",
help="Flake errors will only appear as warnings")
group.addoption(
'--nbsmoke-verify',
action="store_true",
help="Verify notebooks")
group.addoption(
'--store-html',
action="store",
dest='store_html',
default='',
help="When running, store rendered-to-html notebooks in the supplied path.")
parser.addini('nbsmoke_cell_timeout', "nbsmoke's nbconvert cell timeout")
####
# TODO: hacks to work around pyviz team desire to not use pytest's markers
parser.addini('nbsmoke_skip_run', 're to skip (multi-line; one pattern per line)')
group.addoption(
'--ignore-nbsmoke-skip-run',
action="store_true",
help="Ignore any skip list in the ini file (allows to run all nbs if desired)")
####
# TODO: remove/rename/see pytest python_files
parser.addini('it_is_nb_file', 're to determine whether file is notebook')
parser.addini('nbsmoke_flakes_to_ignore', "flake messages to ignore during nbsmoke's flake checking")
parser.addini('nbsmoke_flakes_cell_magics_blacklist', "cell magics you don't want to see - i.e. treat as lint.")
parser.addini('nbsmoke_flakes_line_magics_blacklist', "line magics you don't want to see - i.e. treat as lint")
parser.addini('nbsmoke_magic_handlers', "path to .py file containing custom magic handlers")
@contextlib.contextmanager
def cwd(d):
orig = os.getcwd()
os.chdir(d)
try:
yield
finally:
os.chdir(orig)
###################################################
class RunNb(pytest.Item):
def repr_failure(self, excinfo):
return excinfo.exconly(True)
def runtest(self):
self._skip()
with io.open(self.name,encoding='utf8') as nb:
notebook = nbformat.read(nb, as_version=4)
# TODO: which kernel? run in pytest's or use new one (make it option)
_timeout = self.parent.parent.config.getini('nbsmoke_cell_timeout')
kwargs = dict(timeout=int(_timeout) if _timeout!='' else 300,
allow_errors=False,
# or sys.version_info[1] ?
kernel_name='python')
ep = ExecutePreprocessor(**kwargs)
with cwd(os.path.dirname(self.name)): # jupyter notebook always does this, right?
ep.preprocess(notebook,{})
# TODO: clean up this option handling
if self.parent.parent.config.option.store_html != '':
he = nbconvert.HTMLExporter()
# Backwards incompatible change in nbconvert 6 in template file names
if nbconvert.version_info[0] < 6:
he.template_file = 'basic'
else:
he.template_file = 'classic/base.html.j2'
# could maybe use this for chance of testing the html? but not the aim of this project
#he.template_file = 'basic'
html, resources = he.from_notebook_node(notebook)
with io.open(os.path.join(self.parent.parent.config.option.store_html,os.path.basename(self.name)+'.html'),'w',encoding='utf8') as f:
f.write(html)
def _skip(self):
_skip_patterns = self.parent.parent.config.getini('nbsmoke_skip_run')
if not self.parent.parent.config.option.ignore_nbsmoke_skip_run:
for pattern in _skip_patterns.splitlines():
if re.match(pattern,self.nodeid.split("::")[0],re.IGNORECASE):
pytest.skip()
class IPyNbFile(pytest.File):
def __init__(self, fspath, parent=None, config=None, session=None, dowhat=RunNb):
self._dowhat = dowhat
super(IPyNbFile,self).__init__(fspath, parent=parent, config=None, session=None)
def collect(self):
if hasattr(self._dowhat, "from_parent"):
yield self._dowhat.from_parent(self,name=str(self.fspath))
else: # older pytest (https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent)
yield self._dowhat(str(self.fspath), self)
def pytest_collect_file(path, parent):
opt = parent.config.option
# TODO: Make this pattern standard/configurable.
# match .ipynb except .nbval.ipynb
it_is_nb_file = parent.config.getini('it_is_nb_file')
if it_is_nb_file == '':
#"^((?!\.nbval).)*\.ipynb$"
it_is_nb_file = r"^.*\.ipynb"
if re.match(it_is_nb_file,path.strpath,re.IGNORECASE):
if opt.nbsmoke_run or opt.nbsmoke_lint or opt.nbsmoke_verify:
# TODO express via the options system if you ever figure it out
# Hmm, should be able to do all - clean up!
assert (opt.nbsmoke_run ^ opt.nbsmoke_lint) ^ opt.nbsmoke_verify
if opt.nbsmoke_run:
dowhat = RunNb
elif opt.nbsmoke_lint:
dowhat = LintNb
elif opt.nbsmoke_verify:
dowhat = VerifyNb
if hasattr(IPyNbFile, "from_parent"):
return IPyNbFile.from_parent(parent, fspath=path, dowhat=dowhat)
else: # for older pytest (https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent)
return IPyNbFile(path, parent, dowhat=dowhat)
| 36.613636 | 149 | 0.632992 |
bab25d74807b46159cbec3e735a87ec7c883b662 | 12,762 | py | Python | env/lib/python3.8/site-packages/plotly/validators/_scattermapbox.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2 | 2021-07-07T20:16:23.000Z | 2021-07-14T14:03:09.000Z | env/lib/python3.8/site-packages/plotly/validators/_scattermapbox.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 5 | 2020-06-05T20:56:21.000Z | 2021-09-22T19:12:42.000Z | env/lib/python3.8/site-packages/plotly/validators/_scattermapbox.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2 | 2020-07-05T12:57:14.000Z | 2020-07-05T12:58:00.000Z | import _plotly_utils.basevalidators
class ScattermapboxValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="scattermapbox", parent_name="", **kwargs):
super(ScattermapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Scattermapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
below
Determines if this scattermapbox trace's layers
are to be inserted before the layer with the
specified ID. By default, scattermapbox layers
are inserted above all the base layers. To
place the scattermapbox layers above every
other layer, set `below` to "''".
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the provided data arrays are
connected.
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for customdata .
fill
Sets the area to fill with a solid color. Use
with `fillcolor` if not "none". "toself"
connects the endpoints of the trace (or each
segment of the trace if it has gaps) into a
closed shape.
fillcolor
Sets the fill color. Defaults to a half-
transparent variant of the line color, marker
color, or marker line color, whichever is
available.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattermapbox.Hove
rlabel` instance or dict with compatible
properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
for details on the date formatting syntax. The
variables available in `hovertemplate` are the
ones emitted as event data described at this
link https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for hovertemplate .
hovertext
Sets hover text elements associated with each
(lon,lat) pair If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to the this trace's (lon,lat) coordinates. To
be seen, trace `hoverinfo` must contain a
"text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for hovertext .
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for ids .
lat
Sets the latitude coordinates (in degrees
North).
latsrc
Sets the source reference on Chart Studio Cloud
for lat .
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
line
:class:`plotly.graph_objects.scattermapbox.Line
` instance or dict with compatible properties
lon
Sets the longitude coordinates (in degrees
East).
lonsrc
Sets the source reference on Chart Studio Cloud
for lon .
marker
:class:`plotly.graph_objects.scattermapbox.Mark
er` instance or dict with compatible properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for meta .
mode
Determines the drawing mode for this scatter
trace. If the provided `mode` includes "text"
then the `text` elements appear at the
coordinates. Otherwise, the `text` elements
appear on hover.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattermapbox.Sele
cted` instance or dict with compatible
properties
selectedpoints
Array containing integer indices of selected
points. Has an effect only for traces that
support selections. Note that an empty array
means an empty selection where the `unselected`
are turned on for all points, whereas, any
other non-array values means no selection all
where the `selected` and `unselected` styles
have no effect.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattermapbox.Stre
am` instance or dict with compatible properties
subplot
Sets a reference between this trace's data
coordinates and a mapbox subplot. If "mapbox"
(the default value), the data refer to
`layout.mapbox`. If "mapbox2", the data refer
to `layout.mapbox2`, and so on.
text
Sets text elements associated with each
(lon,lat) pair If a single string, the same
string appears over all the data points. If an
array of string, the items are mapped in order
to the this trace's (lon,lat) coordinates. If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be
seen in the hover labels.
textfont
Sets the icon text font
(color=mapbox.layer.paint.text-color,
size=mapbox.layer.layout.text-size). Has an
effect only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with
respects to the (x,y) coordinates.
textsrc
Sets the source reference on Chart Studio Cloud
for text .
texttemplate
Template string used for rendering the
information text that appear on points. Note
that this will override `textinfo`. Variables
are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's
syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
for details on the date formatting syntax.
Every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are
available. variables `lat`, `lon` and `text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud
for texttemplate .
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattermapbox.Unse
lected` instance or dict with compatible
properties
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
""",
),
**kwargs
)
| 48.340909 | 78 | 0.545212 |
9b54019d17daf089dd6007bac2ffa465d63758d2 | 704 | py | Python | web/addons/website_quote/__openerp__.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/addons/website_quote/__openerp__.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/addons/website_quote/__openerp__.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | {
'name': 'Online Proposals',
'category': 'Website',
'summary': 'Send Professional Quotations',
'website': 'https://www.odoo.com/page/quote-builder',
'version': '1.0',
'description': """
OpenERP Sale Quote Roller
=========================
""",
'author': 'OpenERP SA',
'depends': ['website','sale', 'mail'],
'data': [
'views/website_quotation.xml',
'views/website_quotation_backend.xml',
'views/report_saleorder.xml',
'data/website_quotation_data.xml',
'security/ir.model.access.csv',
],
'demo': [
'data/website_quotation_demo.xml'
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
}
| 26.074074 | 57 | 0.553977 |
bf32946985452099eb7a358aa2319b84fc926e53 | 2,898 | py | Python | utils/plot.py | Z-yq/TensorflowTTS | abbc79f6e8d97fed3c7308ae844b97f59d349301 | [
"Apache-2.0"
] | 50 | 2021-04-02T07:23:00.000Z | 2022-03-23T06:13:58.000Z | utils/plot.py | Z-yq/TensorflowTTS | abbc79f6e8d97fed3c7308ae844b97f59d349301 | [
"Apache-2.0"
] | 3 | 2021-05-21T07:23:57.000Z | 2021-12-08T08:25:19.000Z | utils/plot.py | Z-yq/TensorflowTTS | abbc79f6e8d97fed3c7308ae844b97f59d349301 | [
"Apache-2.0"
] | 15 | 2021-04-17T10:57:17.000Z | 2022-03-18T09:28:11.000Z | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def split_title_line(title_text, max_words=5):
"""
A function that splits any string based on specific character
(returning it with the string), with maximum number of words on it
"""
seq = title_text.split()
return '\n'.join([' '.join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])
def plot_alignment(alignment, path, info=None, split_title=False, max_len=None):
if max_len is not None:
alignment = alignment[:, :max_len]
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
im = ax.imshow(
alignment,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
if split_title:
title = split_title_line(info)
else:
title = info
plt.xlabel(xlabel)
if info is not None:
plt.title(title)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
def plot_spectrogram(pred_spectrogram, path, info=None, split_title=False, target_spectrogram=None, max_len=None,
auto_aspect=False,figsize=(10,8)):
if max_len is not None:
target_spectrogram = target_spectrogram[:max_len]
pred_spectrogram = pred_spectrogram[:max_len]
if split_title:
title = split_title_line(info)
else:
title = info
fig = plt.figure(figsize=figsize)
# Set common labels
fig.text(0.5, 0.18, title, horizontalalignment='center', fontsize=16)
# target spectrogram subplot
if target_spectrogram is not None:
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
if auto_aspect:
im = ax1.imshow(np.rot90(target_spectrogram), aspect='auto', interpolation='none')
else:
im = ax1.imshow(np.rot90(target_spectrogram), interpolation='none')
ax1.set_title('Target Mel-Spectrogram')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
ax2.set_title('Predicted Mel-Spectrogram')
else:
ax2 = fig.add_subplot(211)
if auto_aspect:
im = ax2.imshow(np.rot90(pred_spectrogram), aspect='auto', interpolation='none')
else:
im = ax2.imshow(np.rot90(pred_spectrogram), interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax2)
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
if __name__ == '__main__':
import tensorflow as tf
import librosa
wav = librosa.load('gresult.wav', 16000)[0]
mel = tf.signal.stft(wav, 1024, 200, 1024)
mel=mel.numpy()
mel=np.log(mel)
print(mel.shape,mel.dtype)
plot_spectrogram(mel.astype('float32'),'test.png')
| 29.876289 | 113 | 0.646998 |
0f70e53221634c25ef5b9365ad4e3d8d00516b0d | 1,605 | py | Python | doc/source/isphx/objpull.py | flying-sheep/sphobjinv | 0aa56e3982f99bf811cef4126e452ddd65cae088 | [
"MIT"
] | 55 | 2016-10-30T05:03:16.000Z | 2022-03-13T18:00:44.000Z | doc/source/isphx/objpull.py | flying-sheep/sphobjinv | 0aa56e3982f99bf811cef4126e452ddd65cae088 | [
"MIT"
] | 202 | 2016-05-16T13:25:50.000Z | 2022-03-22T20:05:45.000Z | doc/source/isphx/objpull.py | bskinn/sphinx-objectsinv | 505c7afc656f20b9e105f4ead9c6eb570eef971a | [
"MIT"
] | 4 | 2020-03-29T01:47:50.000Z | 2021-04-07T13:37:05.000Z | # Quickie script for refreshing the local objects.inv cache
# OVERWRITES EXISTING FILES, WITH PRE-DELETION
def pullobjs():
import os
import urllib.request as urlrq
import certifi
# Open conf.py, retrieve content and compile
with open(os.path.join(os.pardir, 'conf.py'), 'r') as f:
confcode = compile(f.read(), 'conf.py', 'exec')
# Execute conf.py into the global namespace (I know, sloppy)
exec(confcode, globals())
# Iterate intersphinx_mapping from conf.py to retrieve the objects.inv files
# Make use of the conf.py 'isphx_objstr' substitution string, too
for n, t in intersphinx_mapping.items():
print('{0}:\n'.format(n) + '-' * 16)
try:
os.remove(isphx_objstr.format(n))
except FileNotFoundError:
pass # No big deal
try:
resp = urlrq.urlopen(t[0] + '/objects.inv', cafile=certifi.where())
except Exception as e:
print('HTTP request failed:\n' + str(e) + '\n')
continue
else:
print('... located ...')
try:
b_s = resp.read()
except Exception as e:
print('Download failed:\n' + str(e) + '\n')
continue
else:
print('... downloaded ...')
try:
with open(isphx_objstr.format(n), 'wb') as f:
f.write(b_s)
except Exception as e:
print('Write failed:\n' + str(e) + '\n')
continue
else:
print('... done.')
print('')
if __name__ == '__main__':
pullobjs()
| 25.887097 | 80 | 0.54704 |
dc7a0224e7d734a9ffb14f4ffe0f3e3ee9497609 | 4,145 | py | Python | research/counting/train.py | OOXXXXOO/XCloud | 021342eec570f12d82ae750a645dc1cb99cfb733 | [
"MIT"
] | null | null | null | research/counting/train.py | OOXXXXOO/XCloud | 021342eec570f12d82ae750a645dc1cb99cfb733 | [
"MIT"
] | 11 | 2021-03-19T15:23:06.000Z | 2022-03-12T00:51:00.000Z | research/counting/train.py | OOXXXXOO/XCloud | 021342eec570f12d82ae750a645dc1cb99cfb733 | [
"MIT"
] | null | null | null | import numpy as np
import time
import torch
import torch.nn as nn
import os
# import visdom
import random
from tqdm import tqdm as tqdm
from research.counting.cannet import CANNet
from research.counting.datasets import ShanghaiTechCrowdCountingDataset
if __name__ == "__main__":
# configuration
train_image_root = './data/Shanghai_part_A/train_data/images'
train_dmap_root = './data/Shanghai_part_A/train_data/ground_truth'
test_image_root = './data/Shanghai_part_A/test_data/images'
test_dmap_root = './data/Shanghai_part_A/test_data/ground_truth'
gpu_or_cpu = 'cuda' # use cuda or cpu
lr = 1e-7
batch_size = 1
momentum = 0.95
epochs = 20000
steps = [-1, 1, 100, 150]
scales = [1, 1, 1, 1]
workers = 4
seed = time.time()
print_freq = 30
# vis=visdom.Visdom()
device = torch.device(gpu_or_cpu)
torch.cuda.manual_seed(seed)
model = CANNet().to(device)
criterion = nn.MSELoss(size_average=False).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr,
momentum=momentum,
weight_decay=0)
# optimizer=torch.optim.Adam(model.parameters(),lr)
train_dataset = ShanghaiTechCrowdCountingDataset(train_image_root, train_dmap_root, gt_downsample=8, phase='train')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True)
test_dataset = ShanghaiTechCrowdCountingDataset(test_image_root, test_dmap_root, gt_downsample=8, phase='test')
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False)
if not os.path.exists('./checkpoints'):
os.mkdir('./checkpoints')
min_mae = 10000
min_epoch = 0
train_loss_list = []
epoch_list = []
test_error_list = []
for epoch in range(0, epochs):
# training phase
model.train()
epoch_loss = 0
for i, (img, gt_dmap) in enumerate(tqdm(train_loader)):
img = img.to(device)
gt_dmap = gt_dmap.to(device)
# forward propagation
et_dmap = model(img)
# calculate loss
loss = criterion(et_dmap, gt_dmap)
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print("epoch:",epoch,"loss:",epoch_loss/len(dataloader))
epoch_list.append(epoch)
train_loss_list.append(epoch_loss / len(train_loader))
torch.save(model.state_dict(), './checkpoints/epoch_' + str(epoch) + ".pth")
# testing phase
model.eval()
mae = 0
for i, (img, gt_dmap) in enumerate(tqdm(test_loader)):
img = img.to(device)
gt_dmap = gt_dmap.to(device)
# forward propagation
et_dmap = model(img)
mae += abs(et_dmap.data.sum() - gt_dmap.data.sum()).item()
del img, gt_dmap, et_dmap
if mae / len(test_loader) < min_mae:
min_mae = mae / len(test_loader)
min_epoch = epoch
test_error_list.append(mae / len(test_loader))
print("epoch:" + str(epoch) + " error:" + str(mae / len(test_loader)) + " min_mae:" + str(
min_mae) + " min_epoch:" + str(min_epoch))
# vis.line(win=1, X=epoch_list, Y=train_loss_list, opts=dict(title='train_loss'))
# vis.line(win=2, X=epoch_list, Y=test_error_list, opts=dict(title='test_error'))
# show an image
index = random.randint(0, len(test_loader) - 1)
img, gt_dmap = test_dataset[index]
# vis.image(win=3, img=img, opts=dict(title='img'))
# vis.image(win=4, img=gt_dmap / (gt_dmap.max()) * 255, opts=dict(title='gt_dmap(' + str(gt_dmap.sum()) + ')'))
img = img.unsqueeze(0).to(device)
gt_dmap = gt_dmap.unsqueeze(0)
et_dmap = model(img)
et_dmap = et_dmap.squeeze(0).detach().cpu().numpy()
# vis.image(win=5, img=et_dmap / (et_dmap.max()) * 255, opts=dict(title='et_dmap(' + str(et_dmap.sum()) + ')'))
import time
print(time.strftime('%Y.%m.%d %H:%M:%S', time.localtime(time.time())))
| 40.242718 | 119 | 0.620989 |
1a465f5b232d6186913eb99b43ba05e429225848 | 1,996 | py | Python | GUI/main.py | Crismaria11/Proyecto2_DS | e75814a17ea736fb04f6d11a47af039cbf40a02b | [
"MIT"
] | null | null | null | GUI/main.py | Crismaria11/Proyecto2_DS | e75814a17ea736fb04f6d11a47af039cbf40a02b | [
"MIT"
] | null | null | null | GUI/main.py | Crismaria11/Proyecto2_DS | e75814a17ea736fb04f6d11a47af039cbf40a02b | [
"MIT"
] | null | null | null | import tkinter as tk
from PIL import ImageTk, Image # pip3 install Pillow
from tkinter import filedialog
import engine.torch as tengine
import matplotlib.pyplot as plt
def upload(): # AQUI SE SUBE LA IMAGEN
filename = filedialog.askopenfilename(title='open', filetypes=[("Images", ".jpg")])
img = Image.open(filename)
ph = ImageTk.PhotoImage(img)
print(filename)
global current_image_path
current_image_path = filename
tk_img = img.resize((256, 256), Image.ANTIALIAS)
tk_img = ImageTk.PhotoImage(tk_img)
panel = tk.Label(mainWindow, image=tk_img)
panel.image = tk_img
panel.pack()
panel.place(x=400, y=50)
def process_img(): # AQUI SE LLAMA AL MODELO PARA ANALIZAR LA IMAGEN
global current_image_path
img = Image.open(current_image_path)
img, covidPositive = trunner.predict(img, current_image_path)
textResult = "El individuo no presenta COVID-19"
if covidPositive:
textResult = "El individuo si presenta COVID-19"
plt.show()
result = tk.Label(mainWindow, text=textResult)
result.pack(anchor=tk.NW)
result.config(fg="red", bg="#c3d6ff", font=("Arial", 14))
result.place(x=25, y=150)
trunner = tengine.TorchEngine()
mainWindow = tk.Tk()
mainWindow.title("Deteccion de COVID-19")
mainWindow.geometry("700x400")
mainWindow.config(bg="#c3d6ff")
title = tk.Label(mainWindow, text="Deteccion de COVID-19")
title.pack(anchor=tk.NW)
title.config(fg="red", bg="#c3d6ff", font=("Arial", 22))
title.place(x=25)
uploadButton = tk.Button(mainWindow, text="Subir rayos x...", height=2, width=20, command=upload)
uploadButton.pack(anchor=tk.NW)
uploadButton.config(bg="#c0c0c0", font=("Arial", 9))
uploadButton.place(x=25, y=50)
processButton = tk.Button(mainWindow, text="Procesar", height=2, width=20, command=process_img)
processButton.pack(anchor=tk.NW)
processButton.config(bg="#c0c0c0", font=("Arial", 9))
processButton.place(x=200, y=50)
current_image_path = None
mainWindow.mainloop()
| 29.352941 | 97 | 0.716433 |
86209ddabb585ea77e0d967a96a70f187aaec9af | 5,283 | py | Python | auto_editor/validateInput.py | WyattBlue/auto-editor | 6bb35cb0119f07e9b18eec0fbec3163d1bd281b2 | [
"Unlicense"
] | 835 | 2020-04-30T06:04:46.000Z | 2022-03-31T06:14:06.000Z | auto_editor/validateInput.py | WyattBlue/auto-editor | 6bb35cb0119f07e9b18eec0fbec3163d1bd281b2 | [
"Unlicense"
] | 191 | 2020-05-04T16:17:10.000Z | 2022-03-29T09:30:49.000Z | auto_editor/validateInput.py | WyattBlue/auto-editor | 6bb35cb0119f07e9b18eec0fbec3163d1bd281b2 | [
"Unlicense"
] | 183 | 2020-05-04T12:39:23.000Z | 2022-03-30T16:56:34.000Z | '''validateInput.py'''
import os
import re
import sys
from auto_editor.utils.progressbar import ProgressBar
invalidExtensions = ['.txt', '.md', '.rtf', '.csv', '.cvs', '.html', '.htm',
'.xml', '.yaml', '.png', '.jpeg', '.jpg', '.exe', '.doc',
'.docx', '.odt', '.pptx', '.xlsx', '.xls', 'ods', '.pdf', '.bat', '.dll',
'.prproj', '.psd', '.aep', '.zip', '.rar', '.7z', '.java', '.class', '.js',
'.c', '.cpp', '.csharp', '.py', '.app', '.git', '.github', '.gitignore',
'.db', '.ini', '.BIN', '.svg', '.in', '.pyc', '.log', '.xsd', '.ffpreset',
'.kys', '.essentialsound']
class MyLogger(object):
@staticmethod
def debug(msg):
pass
@staticmethod
def warning(msg):
print(msg, file=sys.stderr)
@staticmethod
def error(msg):
if("'Connection refused'" in msg):
pass
else:
print(msg, file=sys.stderr)
def parse_bytes(bytestr):
# Parse a string indicating a byte quantity into an integer.
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if(matchobj is None):
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return round(number * multiplier)
def sponsor_block_api(_id, categories, log):
# type: (str, list, Any) -> dict
from urllib import request
from urllib.error import HTTPError
import json
cat_url = 'categories=['
for i, cat in enumerate(categories):
if(i == 0):
cat_url += '"{}"'.format(cat)
else:
cat_url += ',"{}"'.format(cat)
cat_url += ']'
try:
contents = request.urlopen(
'https://sponsor.ajay.app/api/skipSegments?videoID={}&{}'.format(_id, cat_url))
return json.loads(contents.read())
except HTTPError:
log.warning("Couldn't find skipSegments for id: {}".format(_id))
return None
def download_video(my_input, args, ffmpeg, log):
outtmpl = re.sub(r'\W+', '-', my_input)
if(outtmpl.endswith('-mp4')):
outtmpl = outtmpl[:-4]
outtmpl += '.mp4'
if(args.output_dir is not None):
outtmpl = os.path.join(args.output_dir, outtmpl)
try:
import youtube_dl
except ImportError:
log.error('Download the youtube-dl python library to download URLs.\n'
' pip3 install youtube-dl')
if(not os.path.isfile(outtmpl)):
ytbar = ProgressBar(100, 'Downloading')
def my_hook(d):
if(d['status'] == 'downloading'):
ytbar.tick(float(d['_percent_str'].replace('%','')))
def abspath(path):
if(path is None):
return None
return os.path.abspath(path)
ydl_opts = {
'nocheckcertificate': not args.check_certificate,
'outtmpl': outtmpl,
'ffmpeg_location': ffmpeg.getPath(),
'format': args.format,
'ratelimit': parse_bytes(args.limit_rate),
'logger': MyLogger(),
'cookiefile': abspath(args.cookies),
'download_archive': abspath(args.download_archive),
'progress_hooks': [my_hook],
}
for item, key in ydl_opts.items():
if(item is None):
del ydl_opts[key]
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.download([my_input])
except youtube_dl.utils.DownloadError:
log.conwrite('')
log.error('YouTube-dl: Connection Refused.')
log.conwrite('')
return outtmpl
def _valid_files(path, bad_exts):
for f in os.listdir(path):
if(f[f.rfind('.'):] not in bad_exts and not os.path.isdir(f)
and not f.startswith('.')):
yield os.path.join(path, f)
def get_segment(args, my_input, log):
if(args.block is not None):
if(args.id is not None):
return sponsor_block_api(args.id, args.block, log)
match = re.search(r'youtube\.com/watch\?v=(?P<match>[A-Za-z0-9_-]{11})',
my_input)
if(match):
youtube_id = match.groupdict()['match']
return sponsor_block_api(youtube_id, args.block, log)
return None
def valid_input(inputs, ffmpeg, args, log):
new_inputs = []
segments = []
for my_input in inputs:
if(os.path.isdir(my_input)):
new_inputs += sorted(_valid_files(my_input, invalidExtensions))
segments += [None] * (len(new_inputs) - len(segments))
elif(os.path.isfile(my_input)):
_, ext = os.path.splitext(my_input)
if(ext == ''):
log.error('File must have an extension.')
if(ext in invalidExtensions):
log.error('Invalid file extension "{}" for {}'.format(ext, my_input))
new_inputs.append(my_input)
segments.append(get_segment(args, my_input, log))
elif(my_input.startswith('http://') or my_input.startswith('https://')):
new_inputs.append(download_video(my_input, args, ffmpeg, log))
segments.append(get_segment(args, my_input, log))
else:
log.error('Could not find file: {}'.format(my_input))
return new_inputs, segments
| 33.226415 | 91 | 0.563884 |
2e9f0194a82bffdedad7fd24a9d039612b7340ab | 1,400 | py | Python | extraction/vard/test/integration.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | extraction/vard/test/integration.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | extraction/vard/test/integration.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | import os, sys, subprocess, shutil
import unittest
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "bench")))
from vard import Client
import time
VARD = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "vard.native")
class TestVard(unittest.TestCase):
client = None
processes = None
def setUp(self):
"""Start up a cluster"""
self.processes = []
for i in range(3):
port = 8000 + i
args = [VARD, "-dbpath", "db-%d" % i, "-port", "%d" % port, "-node", "0,localhost:9000", "-node", "1,localhost:9001", "-node", "2,localhost:9002", "-me", "%d" % i]
FNULL = open(os.devnull, "w")
proc = subprocess.Popen(args, stdout=FNULL, stderr=subprocess.STDOUT, close_fds=True)
self.processes.append(proc)
time.sleep(1)
cluster = [("localhost", 8000), ("localhost", 8001), ("localhost", 8002)]
host, port = Client.find_leader(cluster)
self.client = Client(host, port)
def tearDown(self):
for i in range(3):
self.processes[i].terminate()
shutil.rmtree("db-%d" % i)
self.client = None
self.processes = None
def test_put_get(self):
self.client.put("answer", "42")
self.assertEqual(self.client.get("answer"), "42")
def test_put_delete_get(self):
self.client.put("answer", "42")
self.client.delete("answer")
self.assertEqual(self.client.get("answer"), None)
if __name__ == "__main__":
unittest.main() | 37.837838 | 166 | 0.67 |