hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fa517fc4aeda835e2192b711ae77906468916b6 | 581 | py | Python | setup.py | Icebreaker454/MRNet-1 | 6dca2619d0afcbd97decc3e36609c3baec83411a | [
"MIT"
] | null | null | null | setup.py | Icebreaker454/MRNet-1 | 6dca2619d0afcbd97decc3e36609c3baec83411a | [
"MIT"
] | null | null | null | setup.py | Icebreaker454/MRNet-1 | 6dca2619d0afcbd97decc3e36609c3baec83411a | [
"MIT"
] | null | null | null | from setuptools import find_packages
from setuptools import setup
import setuptools
from distutils.command.build import build as _build
import subprocess
REQUIRED_PACKAGES = [
"click",
"joblib",
"numpy",
"pandas",
"Pillow",
"scikit-learn",
"torch",
"torchvision",
"tqdm",
"google-cloud-storage",
]
setup(
name="trainer",
version='0.1.2',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Vertex AI | Training | PyTorch | Text Classification | Python Package'
)
| 19.366667 | 87 | 0.683305 |
ed60244e88f157e1874be2616d47b7c73789d6d4 | 12,466 | py | Python | packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py | xiaobai/swift-lldb | 9238527ce430e6837108a16d2a91b147551fb83c | [
"Apache-2.0"
] | 765 | 2015-12-03T16:44:59.000Z | 2022-03-07T12:41:10.000Z | packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py | xiaobai/swift-lldb | 9238527ce430e6837108a16d2a91b147551fb83c | [
"Apache-2.0"
] | 1,815 | 2015-12-11T23:56:05.000Z | 2020-01-10T19:28:43.000Z | packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py | xiaobai/swift-lldb | 9238527ce430e6837108a16d2a91b147551fb83c | [
"Apache-2.0"
] | 284 | 2015-12-03T16:47:25.000Z | 2022-03-12T05:39:48.000Z | from __future__ import print_function
import json
import re
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteThreadsInStopReply(
gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [
"read packet: $QListThreadsInStopReply#21",
"send packet: $OK#00",
]
def gather_stop_reply_fields(self, post_startup_log_lines, thread_count,
field_names):
# Set up the inferior args.
inferior_args = []
for i in range(thread_count - 1):
inferior_args.append("thread:new")
inferior_args.append("sleep:10")
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
# Assumes test_sequence has anything added needed to setup the initial state.
# (Like optionally enabling QThreadsInStopReply.)
if post_startup_log_lines:
self.test_sequence.add_log_lines(post_startup_log_lines, True)
self.test_sequence.add_log_lines([
"read packet: $c#63"
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
hw_info = self.parse_hw_info(context)
# Give threads time to start up, then break.
time.sleep(1)
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: {}".format(
chr(3)),
{
"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {
1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Wait until all threads have started.
threads = self.wait_for_thread_count(thread_count, timeout_seconds=3)
self.assertIsNotNone(threads)
self.assertEqual(len(threads), thread_count)
# Run, then stop the process, grab the stop reply content.
self.reset_test_sequence()
self.test_sequence.add_log_lines(["read packet: $c#63",
"read packet: {}".format(chr(3)),
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Parse the stop reply contents.
key_vals_text = context.get("key_vals_text")
self.assertIsNotNone(key_vals_text)
kv_dict = self.parse_key_val_dict(key_vals_text)
self.assertIsNotNone(kv_dict)
result = dict();
result["pc_register"] = hw_info["pc_register"]
result["little_endian"] = hw_info["little_endian"]
for key_field in field_names:
result[key_field] = kv_dict.get(key_field)
return result
def gather_stop_reply_threads(self, post_startup_log_lines, thread_count):
# Pull out threads from stop response.
stop_reply_threads_text = self.gather_stop_reply_fields(
post_startup_log_lines, thread_count, ["threads"])["threads"]
if stop_reply_threads_text:
return [int(thread_id, 16)
for thread_id in stop_reply_threads_text.split(",")]
else:
return []
def gather_stop_reply_pcs(self, post_startup_log_lines, thread_count):
results = self.gather_stop_reply_fields( post_startup_log_lines,
thread_count, ["threads", "thread-pcs"])
if not results:
return []
threads_text = results["threads"]
pcs_text = results["thread-pcs"]
thread_ids = threads_text.split(",")
pcs = pcs_text.split(",")
self.assertTrue(len(thread_ids) == len(pcs))
thread_pcs = dict()
for i in range(0, len(pcs)):
thread_pcs[int(thread_ids[i], 16)] = pcs[i]
result = dict()
result["thread_pcs"] = thread_pcs
result["pc_register"] = results["pc_register"]
result["little_endian"] = results["little_endian"]
return result
def switch_endian(self, egg):
return "".join(reversed(re.findall("..", egg)))
def parse_hw_info(self, context):
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
endian = process_info.get("endian")
reg_info = self.parse_register_info_packets(context)
(pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_info)
hw_info = dict()
hw_info["pc_register"] = pc_lldb_reg_index
hw_info["little_endian"] = (endian == "little")
return hw_info
def gather_threads_info_pcs(self, pc_register, little_endian):
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: $jThreadsInfo#c1",
{
"direction": "send",
"regex": r"^\$(.*)#[0-9a-fA-F]{2}$",
"capture": {
1: "threads_info"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
threads_info = context.get("threads_info")
register = str(pc_register)
# The jThreadsInfo response is not valid JSON data, so we have to
# clean it up first.
jthreads_info = json.loads(re.sub(r"}]", "}", threads_info))
thread_pcs = dict()
for thread_info in jthreads_info:
tid = thread_info["tid"]
pc = thread_info["registers"][register]
thread_pcs[tid] = self.switch_endian(pc) if little_endian else pc
return thread_pcs
def QListThreadsInStopReply_supported(self):
procs = self.prep_debug_monitor_and_inferior()
self.test_sequence.add_log_lines(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
@debugserver_test
def test_QListThreadsInStopReply_supported_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.QListThreadsInStopReply_supported()
@llgs_test
def test_QListThreadsInStopReply_supported_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.QListThreadsInStopReply_supported()
def stop_reply_reports_multiple_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
self.assertEqual(len(stop_reply_threads), thread_count)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
@debugserver_test
def test_stop_reply_reports_multiple_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_multiple_threads(5)
# In current implementation of llgs on Windows, as a response to '\x03' packet, the debugger
# of the native process will trigger a call to DebugBreakProcess that will create a new thread
# to handle the exception debug event. So one more stop thread will be notified to the
# delegate, e.g. llgs. So tests below to assert the stop threads number will all fail.
@expectedFailureAll(oslist=["windows"])
@llgs_test
def test_stop_reply_reports_multiple_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_multiple_threads(5)
def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is not
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(None, thread_count)
self.assertEqual(len(stop_reply_threads), 0)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
@debugserver_test
def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.no_QListThreadsInStopReply_supplies_no_threads(5)
@expectedFailureAll(oslist=["windows"])
@llgs_test
def test_no_QListThreadsInStopReply_supplies_no_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.no_QListThreadsInStopReply_supplies_no_threads(5)
def stop_reply_reports_correct_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
self.assertEqual(len(stop_reply_threads), thread_count)
# Gather threads from q{f,s}ThreadInfo.
self.reset_test_sequence()
self.add_threadinfo_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
self.assertEqual(len(threads), thread_count)
# Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads
for tid in threads:
self.assertTrue(tid in stop_reply_threads)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
@debugserver_test
def test_stop_reply_reports_correct_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_correct_threads(5)
@expectedFailureAll(oslist=["windows"])
@llgs_test
def test_stop_reply_reports_correct_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_correct_threads(5)
def stop_reply_contains_thread_pcs(self, thread_count):
results = self.gather_stop_reply_pcs(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
stop_reply_pcs = results["thread_pcs"]
pc_register = results["pc_register"]
little_endian = results["little_endian"]
self.assertEqual(len(stop_reply_pcs), thread_count)
threads_info_pcs = self.gather_threads_info_pcs(pc_register,
little_endian)
self.assertEqual(len(threads_info_pcs), thread_count)
for thread_id in stop_reply_pcs:
self.assertTrue(thread_id in threads_info_pcs)
self.assertTrue(int(stop_reply_pcs[thread_id], 16)
== int(threads_info_pcs[thread_id], 16))
@expectedFailureAll(oslist=["windows"])
@llgs_test
def test_stop_reply_contains_thread_pcs_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_contains_thread_pcs(5)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
@debugserver_test
def test_stop_reply_contains_thread_pcs_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_contains_thread_pcs(5)
| 39.955128 | 106 | 0.649045 |
1eaa5f668d125565989d90d736795e73e104a512 | 543 | py | Python | gammapy/utils/tests/test_array.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/tests/test_array.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/tests/test_array.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from ..array import array_stats_str, shape_2N
def test_array_stats_str():
actual = array_stats_str(np.pi, "pi")
assert actual == "pi : size = 1, min = 3.142, max = 3.142\n"
actual = array_stats_str([np.pi, 42])
assert actual == "size = 2, min = 3.142, max = 42.000\n"
def test_shape_2N():
shape = (34, 89, 120, 444)
expected_shape = (40, 96, 128, 448)
assert expected_shape == shape_2N(shape=shape, N=3)
| 30.166667 | 82 | 0.627993 |
7a5136c75ecebdfacaddf9b5436a0c77b0d5f028 | 4,934 | py | Python | kmhm-auto/kmhm_singleton.py | alephO/kmhm-auto | 5da47c3e44ccc088bed1147c3a078dcfc82d62e6 | [
"Apache-2.0"
] | null | null | null | kmhm-auto/kmhm_singleton.py | alephO/kmhm-auto | 5da47c3e44ccc088bed1147c3a078dcfc82d62e6 | [
"Apache-2.0"
] | null | null | null | kmhm-auto/kmhm_singleton.py | alephO/kmhm-auto | 5da47c3e44ccc088bed1147c3a078dcfc82d62e6 | [
"Apache-2.0"
] | null | null | null | # This file handles the singleton of the main bot logic
from constants import TMP_DICT
from chrome_api.pcdta import pcdta
from enum import Enum
from utils.logger import log_adapter
from utils.img_handler import img_handler
from db.sqlite_handler import dbHandler
from db.image_samples import image_sampler
import time
import random
class KmhmAuto(object):
def __init__(self):
self._pcdta = pcdta
self._sm = Kmhm_Sm()
self._pcdta.hook_chrome(port=TMP_DICT['chrome_debug_port'])
self._logger = log_adapter.getlogger(__name__)
self.continue_loop = True
self.simple_continue_loop = True
# Screenshot data
self.sc_data = None
# Simple step list and corresponding templates
self.sp_lst = [
'events',
'e_ssshz/title',
'e_ssshz/stages',
'e_ssshz/e1',
['menu/light','menu/hsxg'],
'menu/hsxg',
'e_ssshz/gotostage',
'e_ssshz/callfor',
'e_ssshz/attack',
['popup/discover', 'popup/ok'],
'menu/nextstep',
#['popup/ap1', 'popup/apto3', 'popup/addap'],
'popup/addap',
'menu/redo'
]
def start_bot(self):
self._pcdta.start_bot()
def next_step(self):
self.update_sm_if_required()
if not self._sm.location_valid():
self._logger.error('Location is Invalid')
self.continue_loop = False
return
def simple_next_step(self):
self._logger.debug('Simple step')
sc = img_handler.take_screenshot(trim=True)
non_changed = 0
# sc is an Image adapter. Can't use if sc
if sc is not None:
old_sc_pp = img_handler.get_picked_pixels(self.sc_data)
sc_pp = img_handler.get_picked_pixels(sc)
if not img_handler.pixels_rough_equal(old_sc_pp, sc_pp):
self.sc_data = sc
# filename = "maint/" +log_adapter.datetime_str(extention='png')
filename = "maint/current.png"
self._logger.debug("Saving image as %s" % filename)
img_handler.save_img(filename, self.sc_data)
image_sampler.log_image(sc, sc_pp)
else:
self._logger.debug("Don't save image. Didn't change a lot")
self.sc_data = sc
if not img_handler.pixels_rough_equal(old_sc_pp, sc_pp, rate=0.99) or \
non_changed > 5:
# Must 100% match to skip match process
non_changed = 0
simple_clicked = self.simple_check_and_click()
else:
non_changed += 1
sleep_time = random.randint(1000, 2000) / 1000
time.sleep(sleep_time)
def simple_check_and_click(self):
clicked = False
def _click_name( name ):
m = img_handler.find(self.sc_data, name)
if m:
self._logger.debug("Found %s. Location is %i-%i, size is %i-%i, sc-offset is %i-%i"
% (name, m[0], m[1], m[2], m[3],
self.sc_data.x_delta, self.sc_data.y_delta))
loc_x, loc_y, size_x, size_y = m
rand_x = random.randrange(2, size_x - 1)
rand_y = random.randrange(2, size_y - 1)
sum_x = self.sc_data.x_delta + loc_x + rand_x
sum_y = self.sc_data.y_delta + loc_y + rand_y
self._logger.debug("Try to click %i-%i" % (sum_x, sum_y))
self._pcdta.click(sum_x, sum_y)
return True
return False
for act in self.sp_lst:
if isinstance(act, list):
if not act:
continue
m = img_handler.find(self.sc_data, act[0])
if not m:
continue
self._logger.debug("Entering unit action " + act[0])
for it in act:
while not _click_name(it):
time.sleep(2)
clicked = True
break
if _click_name( act ):
clicked = True
break
return clicked
def update_sm_if_required(self):
pass
def pcdta(self):
return self._pcdta
def cleanup(self):
self._logger.debug("Running singleton's cleanup function")
dbHandler.cleanup()
class Location(Enum):
UNDEFINED = 0
UNSTARTED = 1
class ButtonType(Enum):
START_GAME = 0
class Kmhm_Sm:
# State machine of the state
def __init__(self):
self._location = Location.UNDEFINED
self._critical_button = None
self._critical_button_loc = None
def location(self):
return self._location
def location_valid(self):
return self._location != Location.UNDEFINED
| 32.675497 | 99 | 0.556344 |
6fbbf647665e36a99084845ce59c35379e4be836 | 5,348 | py | Python | data.py | jiyzhang/bert-ner | ebd9d10c3ad019da030e32cac3f2c504e5f1b957 | [
"Apache-2.0"
] | null | null | null | data.py | jiyzhang/bert-ner | ebd9d10c3ad019da030e32cac3f2c504e5f1b957 | [
"Apache-2.0"
] | null | null | null | data.py | jiyzhang/bert-ner | ebd9d10c3ad019da030e32cac3f2c504e5f1b957 | [
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import pickle
import numpy as np
from collections import Counter
DATADIR = Path("./data/wind")
def words(name):
return '{}.words.txt'.format(name)
def tags(name):
return '{}.tags.txt'.format(name)
def fwords(name):
return str(Path(DATADIR, '{}.words.txt'.format(name)))
def ftags(name):
return str(Path(DATADIR, '{}.tags.txt'.format(name)))
## tags, BIO
tag2label = {"O": 0,
"B-PER": 1, "I-PER": 2,
"B-ORG": 3, "I-ORG": 4
}
def read_corpus(corpus_path):
"""
read corpus and return the list of samples
:param corpus_path:
:return: data
"""
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines()
sent_, tag_ = [], []
for line in lines:
if line != '\n':
[char, label] = line.strip().split()
sent_.append(char)
tag_.append(label)
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
def vocab_build(vocab_path, corpus_path, min_count):
"""
:param vocab_path:
:param corpus_path:
:param min_count:
:return:
"""
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <='\u005a') or ('\u0061' <= word <='\u007a'):
word = '<ENG>'
if word not in word2id:
word2id[word] = [len(word2id)+1, 1]
else:
word2id[word][1] += 1
low_freq_words = []
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>':
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word]
new_id = 1
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
with Path(vocab_path).open('wb') as fw:
pickle.dump(word2id, fw)
def sentence2id(sent, word2id):
"""
:param sent:
:param word2id:
:return:
"""
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id:
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
def read_dictionary(vocab_path):
"""
:param vocab_path:
:return:
"""
#vocab_path = os.path.join(vocab_path)
with Path(vocab_path).open("rb") as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
def random_embedding(vocab_size, embedding_dim):
"""
:param vocab_size:
:param embedding_dim:
:return:
"""
embedding_mat = np.random.uniform(-0.25, 0.25, (vocab_size, embedding_dim))
embedding_mat = np.float32(embedding_mat)
return embedding_mat
def build_hanzi_vocab():
# 1. 汉字
# Get Counter of 汉字 on all the data, filter by min count, save
MINCOUNT = 1
print('Building vocab for Hanzi ')
counter_words = Counter()
for n in ['train', 'valid', 'test']:
with Path(fwords(n)).open() as f:
for line in f:
counter_words.update(line.strip().split('|'))
vocab_words = {w for w, c in counter_words.items() if c >= MINCOUNT}
with Path(DATADIR / 'vocab.words.txt').open('w') as f:
for w in sorted(list(vocab_words)):
f.write('{}\n'.format(w))
print('- done. Kept {} out of {}'.format(len(vocab_words), len(counter_words)))
def build_tag_vocab():
# 2. Tags
# Get all tags from the training set
print('Build vocab for tags')
vocab_tags = set()
with Path(ftags('train')).open() as f:
for line in f:
vocab_tags.update(line.strip().split('|'))
with Path(DATADIR / 'vocab.tags.txt').open('w') as f:
for t in sorted(list(vocab_tags)):
f.write('{}\n'.format(t))
print('- done. Found {} tags.'.format(len(vocab_tags)))
def build_hanzi_embedding():
# Load vocab
with Path(DATADIR / 'vocab.words.txt').open() as f:
word_to_idx = {line.strip(): idx for idx, line in enumerate(f)}
size_vocab = len(word_to_idx)
# Array of zeros
embeddings = np.zeros((size_vocab, 300))
# Get relevant char Chinese vectors
found = 0
print('Reading Chinese Char Vectors (may take a while)')
with Path(DATADIR / 'sgns.context.word-character.char1-1.dynwin5.thr10.neg5.dim300.iter5').open() as f:
for line_idx, line in enumerate(f):
if line_idx % 100000 == 0:
print('- At line {}'.format(line_idx))
line = line.strip().split('|')
if len(line) != 300 + 1:
continue
word = line[0]
embedding = line[1:]
if word in word_to_idx:
found += 1
word_idx = word_to_idx[word]
embeddings[word_idx] = embedding
print('- done. Found {} vectors for {} words'.format(found, size_vocab))
# Save np.array to file
np.savez_compressed(DATADIR / 'sgns.npz', embeddings=embeddings) | 28.147368 | 107 | 0.563762 |
4b06f02f0cc195dbd4641d7c4af873111c812fbe | 5,477 | py | Python | src/JUNCTION_image.py | kwonbosung02/JuntionXseoul2019 | a205e09112f0e0b71b0d844989bb2c047cbec997 | [
"MIT"
] | 3 | 2019-07-09T11:28:26.000Z | 2020-07-13T04:57:44.000Z | src/JUNCTION_image.py | kwonbosung02/JuntionXseoul2019 | a205e09112f0e0b71b0d844989bb2c047cbec997 | [
"MIT"
] | null | null | null | src/JUNCTION_image.py | kwonbosung02/JuntionXseoul2019 | a205e09112f0e0b71b0d844989bb2c047cbec997 | [
"MIT"
] | null | null | null | #모듈 호출------------------------------------------------------------------------------
import cv2
import numpy as np
import time
import dlib
import pybind11
s_img = cv2.imread('test2.png',-1)##############::::
#많이 씀-------------------------------------------------------------------------------
prev_contours = 0
#얼굴 그릴때 씀-------------------------------------------------------------------------
def drawPolyline(im,im2, landmarks, start, end, isClosed=False):
points = []
for i in range(start, end+1):
point = [landmarks.part(i).x, landmarks.part(i).y]
points.append(point)
points = np.array(points, dtype=np.int32)
cv2.polylines(im2, [points], isClosed, (255, 200, 0), thickness=2, lineType=cv2.LINE_8)
#얼굴 검출 부위-------------------------------------------------------------------------
def renderFace(im,im2, landmarks):
assert(landmarks.num_parts == 68)
drawPolyline(im,im2, landmarks, 0, 16) # Jaw line
drawPolyline(im,im2, landmarks, 17, 21) # Left eyebrow
drawPolyline(im,im2, landmarks, 22, 26) # Right eyebrow
drawPolyline(im,im2, landmarks, 27, 30) # Nose bridge
drawPolyline(im,im2, landmarks, 30, 35) # Lower nose
drawPolyline(im,im2, landmarks, 36, 41) # Left eye
drawPolyline(im,im2, landmarks, 42, 47) # Right Eye
drawPolyline(im,im2, landmarks, 48, 59, True) # Outer lip
drawPolyline(im,im2, landmarks, 60, 67, True) # Inner lip
def renderFace2(im, landmarks, color=(0, 255, 0), radius=3):
for p in landmarks.parts():
cv2.circle(im, (p.x, p.y), radius, color, -1)
#얼굴 랜드마크 검출, 마스크용으로 쉽게하기위해 따로 호출-----------------------------------
faceDetector = dlib.get_frontal_face_detector()
landmarkDetector = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
#Prev
prev_face = None
prev_idx = 0
PREV_MAX = 100
#마스크 이미지--------------------------------------------------------------------------
mask__ = cv2.imread('test2.png')
mask_h, mask_w, _ = mask__.shape
mask_x, mask_y = mask_w / 2, mask_h / 2
#캠------------------------------------------------------------------------------------
cam = cv2.VideoCapture(0)
cam.set(3,960)
cam.set(4,480)
#얼굴 테두리 뒷 배경 마스크--------------------------------------------------------------
lower_mask = np.array([100,0,0])
#--------------------------------------------------------------------------------------
Rect = 0
cam.read()
time.sleep(0.5)
while 1:
frame, img = cam.read()
img2 = cv2.imread('test2.png')
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#--------------------------------------------------------------------------------------
mask_fi = img_gray
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#얼굴 테두리 뒷 배경 마스크--------------------------------------------------------------
mask = cv2.inRange(hsv,lower_mask,lower_mask)
#얼굴 테두리----------------------------------------------------------------------------
res = cv2.bitwise_and(img,img, mask= mask)
faceRects = faceDetector(img, 0)
landmarksAll = []
for i in range(0, len(faceRects)):
Rect = dlib.rectangle(int(faceRects[i].left()),int(faceRects[i].top()),
int(faceRects[i].right()),int(faceRects[i].bottom()))
#########################################
faces = detector(img_gray)
for face in faces:
landmarks = landmarkDetector(img,Rect)
landmarks_points = []
for n in range(0, 68):
x = landmarks.part(n).x
y= landmarks.part(n).y
landmarks_points.append((x, y))
landmarksAll.append(landmarks)
points = np.array(landmarks_points, np.int32)
convexhull = cv2.convexHull(points)
renderFace(img,res, landmarks)
#print(convexhull)
cv2.fillConvexPoly(mask_fi,convexhull,255)
#######################################################
res_gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(res_gray,127,255,0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
cnt = prev_contours
continue
if len(contours) > 0:
cnt = contours[0]
prev_contours = cnt
hull = cv2.convexHull(cnt)
(x,y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
x_offset = int(x)
y_offset = int(y)
y1, y2 = int(y_offset-(s_img.shape[0]/2)), int(y_offset + (s_img.shape[0]/2))
x1, x2 = int(x_offset-(s_img.shape[1]/2)), int(x_offset + (s_img.shape[1]/2))
alpha_s = s_img[:,:,3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * img[y1:y2, x1:x2, c])
cv2.rectangle(res,(x1,y1),(x2,y2),(0,255,255),3)
#print(center)
radius = int(radius)
res = cv2.circle(res, center, radius,(0,0,255),3) # yellow
cv2.drawContours(res, [hull], 0,(0,255,0), 3)
cv2.imshow("detect", img)
cv2.imshow("res",res)
cv2.imshow("Mask", mask_fi)
cv2.imshow("img2",img2)
k= cv2.waitKey(5) & 0xff
if k==27:
cam.release()
cv2.destroyAllWindows()
break
cam.release()
cv2.destroyAllWindows()
| 31.65896 | 90 | 0.503013 |
0834365c812e88fb92bd91d2315e67bc4a5a72b9 | 48,265 | py | Python | tests/unit/utils/test_thin.py | hvbarker/salt | 0b1e299b8983854bd55163439e4ac20d81a9dab7 | [
"Apache-2.0"
] | 1 | 2020-05-17T18:00:38.000Z | 2020-05-17T18:00:38.000Z | tests/unit/utils/test_thin.py | hvbarker/salt | 0b1e299b8983854bd55163439e4ac20d81a9dab7 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_thin.py | hvbarker/salt | 0b1e299b8983854bd55163439e4ac20d81a9dab7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
import os
import shutil
import sys
import tempfile
import jinja2
import salt.exceptions
import salt.ext.six
import salt.utils.hashutils
import salt.utils.json
import salt.utils.platform
import salt.utils.stringutils
from salt.ext.six.moves import range
from salt.utils import thin
from salt.utils.stringutils import to_bytes as bts
from tests.support.helpers import TstSuiteLoggingHandler
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
try:
import pytest
except ImportError:
pytest = None
@skipIf(pytest is None, "PyTest is missing")
class SSHThinTestCase(TestCase):
"""
TestCase for SaltSSH-related parts.
"""
def setUp(self):
self.jinja_fp = os.path.dirname(jinja2.__file__)
self.ext_conf = {
"test": {
"py-version": [2, 7],
"path": os.path.join(RUNTIME_VARS.CODE_DIR, "salt"),
"dependencies": {"jinja2": self.jinja_fp},
}
}
self.tops = copy.deepcopy(self.ext_conf)
self.tops["test"]["dependencies"] = [self.jinja_fp]
self.tar = self._tarfile(None).open()
self.digest = salt.utils.hashutils.DigestCollector()
self.exp_files = [
os.path.join("salt", "payload.py"),
os.path.join("jinja2", "__init__.py"),
]
lib_root = os.path.join(RUNTIME_VARS.TMP, "fake-libs")
self.fake_libs = {
"distro": os.path.join(lib_root, "distro"),
"jinja2": os.path.join(lib_root, "jinja2"),
"yaml": os.path.join(lib_root, "yaml"),
"tornado": os.path.join(lib_root, "tornado"),
"msgpack": os.path.join(lib_root, "msgpack"),
}
self.exp_ret = {
"distro": os.path.normpath(
os.path.join(RUNTIME_VARS.CODE_DIR, "distro.py")
),
"jinja2": os.path.normpath(os.path.join(RUNTIME_VARS.CODE_DIR, "jinja2")),
"yaml": os.path.normpath(os.path.join(RUNTIME_VARS.CODE_DIR, "yaml")),
"tornado": os.path.normpath(os.path.join(RUNTIME_VARS.CODE_DIR, "tornado")),
"msgpack": os.path.normpath(os.path.join(RUNTIME_VARS.CODE_DIR, "msgpack")),
"certifi": os.path.normpath(os.path.join(RUNTIME_VARS.CODE_DIR, "certifi")),
"singledispatch": os.path.normpath(
os.path.join(RUNTIME_VARS.CODE_DIR, "singledispatch.py")
),
}
self.exc_libs = ["jinja2", "yaml"]
def tearDown(self):
for lib, fp in self.fake_libs.items():
if os.path.exists(fp):
shutil.rmtree(fp)
self.exc_libs = None
self.jinja_fp = None
self.ext_conf = None
self.tops = None
self.tar = None
self.digest = None
self.exp_files = None
self.fake_libs = None
self.exp_ret = None
def _popen(self, return_value=None, side_effect=None, returncode=0):
"""
Fake subprocess.Popen
:return:
"""
proc = MagicMock()
proc.communicate = MagicMock(return_value=return_value, side_effect=side_effect)
proc.returncode = returncode
popen = MagicMock(return_value=proc)
return popen
def _version_info(self, major=None, minor=None):
"""
Fake version info.
:param major:
:param minor:
:return:
"""
class VersionInfo(tuple):
pass
vi = VersionInfo([major, minor])
vi.major = major or sys.version_info.major
vi.minor = minor or sys.version_info.minor
return vi
def _tarfile(self, getinfo=False):
"""
Fake tarfile handler.
:return:
"""
spec = ["add", "close"]
if getinfo:
spec.append("getinfo")
tf = MagicMock()
tf.open = MagicMock(return_value=MagicMock(spec=spec))
return tf
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_missing_dependencies(self):
"""
Test thin.get_ext_tops contains all required dependencies.
:return:
"""
cfg = {"namespace": {"py-version": [0, 0], "path": "/foo", "dependencies": []}}
with pytest.raises(Exception) as err:
thin.get_ext_tops(cfg)
self.assertIn("Missing dependencies", str(err.value))
self.assertTrue(thin.log.error.called)
self.assertIn("Missing dependencies", thin.log.error.call_args[0][0])
self.assertIn("jinja2, yaml, tornado, msgpack", thin.log.error.call_args[0][0])
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_missing_interpreter(self):
"""
Test thin.get_ext_tops contains interpreter configuration.
:return:
"""
cfg = {"namespace": {"path": "/foo", "dependencies": []}}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn("missing specific locked Python version", str(err.value))
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_wrong_interpreter(self):
"""
Test thin.get_ext_tops contains correct interpreter configuration.
:return:
"""
cfg = {"namespace": {"path": "/foo", "py-version": 2, "dependencies": []}}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn(
"specific locked Python version should be a list of " "major/minor version",
str(err.value),
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_cfg_interpreter(self):
"""
Test thin.get_ext_tops interpreter configuration.
:return:
"""
cfg = {
"namespace": {
"path": "/foo",
"py-version": [2, 6],
"dependencies": {
"jinja2": "",
"yaml": "",
"tornado": "",
"msgpack": "",
},
}
}
with pytest.raises(salt.exceptions.SaltSystemExit):
thin.get_ext_tops(cfg)
assert len(thin.log.warning.mock_calls) == 4
assert sorted([x[1][1] for x in thin.log.warning.mock_calls]) == [
"jinja2",
"msgpack",
"tornado",
"yaml",
]
assert (
"Module test has missing configuration"
== thin.log.warning.mock_calls[0][1][0] % "test"
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=False))
def test_get_ext_tops_dependency_config_check(self):
"""
Test thin.get_ext_tops dependencies are importable
:return:
"""
cfg = {
"namespace": {
"path": "/foo",
"py-version": [2, 6],
"dependencies": {
"jinja2": "/jinja/foo.py",
"yaml": "/yaml/",
"tornado": "/tornado/wrong.rb",
"msgpack": "msgpack.sh",
},
}
}
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.get_ext_tops(cfg)
self.assertIn(
"Missing dependencies for the alternative version in the "
"external configuration",
str(err.value),
)
messages = {}
for cl in thin.log.warning.mock_calls:
messages[cl[1][1]] = cl[1][0] % (cl[1][1], cl[1][2])
for mod in ["tornado", "yaml", "msgpack"]:
self.assertIn("not a Python importable module", messages[mod])
self.assertIn(
"configured with not a file or does not exist", messages["jinja2"]
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.path.isfile", MagicMock(return_value=True))
def test_get_ext_tops_config_pass(self):
"""
Test thin.get_ext_tops configuration
:return:
"""
cfg = {
"namespace": {
"path": "/foo",
"py-version": [2, 6],
"dependencies": {
"jinja2": "/jinja/foo.py",
"yaml": "/yaml/",
"tornado": "/tornado/tornado.py",
"msgpack": "msgpack.py",
"distro": "distro.py",
},
}
}
out = thin.get_ext_tops(cfg)
assert out["namespace"]["py-version"] == cfg["namespace"]["py-version"]
assert out["namespace"]["path"] == cfg["namespace"]["path"]
assert sorted(out["namespace"]["dependencies"]) == sorted(
[
"/tornado/tornado.py",
"/jinja/foo.py",
"/yaml/",
"msgpack.py",
"distro.py",
]
)
@patch("salt.utils.thin.sys.argv", [None, '{"foo": "bar"}'])
@patch("salt.utils.thin.get_tops", lambda **kw: kw)
def test_gte(self):
"""
Test thin.gte external call for processing the info about tops per interpreter.
:return:
"""
assert salt.utils.json.loads(thin.gte()).get("foo") == "bar"
def test_add_dep_path(self):
"""
Test thin._add_dependency function to setup dependency paths
:return:
"""
container = []
for pth in ["/foo/bar.py", "/something/else/__init__.py"]:
thin._add_dependency(container, type(str("obj"), (), {"__file__": pth})())
assert "__init__" not in container[1]
assert container == ["/foo/bar.py", "/something/else"]
def test_thin_path(self):
"""
Test thin.thin_path returns the expected path.
:return:
"""
path = os.sep + os.path.join("path", "to")
expected = os.path.join(path, "thin", "thin.tgz")
self.assertEqual(thin.thin_path(path), expected)
def test_get_salt_call_script(self):
"""
Test get salt-call script rendered.
:return:
"""
out = thin._get_salt_call("foo", "bar", py26=[2, 6], py27=[2, 7], py34=[3, 4])
for line in salt.utils.stringutils.to_str(out).split(os.linesep):
if line.startswith("namespaces = {"):
data = salt.utils.json.loads(line.replace("namespaces = ", "").strip())
assert data.get("py26") == [2, 6]
assert data.get("py27") == [2, 7]
assert data.get("py34") == [3, 4]
if line.startswith("syspaths = "):
data = salt.utils.json.loads(line.replace("syspaths = ", ""))
assert data == ["foo", "bar"]
def test_get_ext_namespaces_empty(self):
"""
Test thin._get_ext_namespaces function returns an empty dictionary on nothing
:return:
"""
for obj in [None, {}, []]:
assert thin._get_ext_namespaces(obj) == {}
def test_get_ext_namespaces(self):
"""
Test thin._get_ext_namespaces function returns namespaces properly out of the config.
:return:
"""
cfg = {"ns": {"py-version": [2, 7]}}
assert thin._get_ext_namespaces(cfg).get("ns") == (2, 7,)
assert isinstance(thin._get_ext_namespaces(cfg).get("ns"), tuple)
def test_get_ext_namespaces_failure(self):
"""
Test thin._get_ext_namespaces function raises an exception
if python major/minor version is not configured.
:return:
"""
with pytest.raises(salt.exceptions.SaltSystemExit):
thin._get_ext_namespaces({"ns": {}})
@patch(
"salt.utils.thin.distro",
type("distro", (), {"__file__": "/site-packages/distro"}),
)
@patch(
"salt.utils.thin.salt",
type(str("salt"), (), {"__file__": "/site-packages/salt"}),
)
@patch(
"salt.utils.thin.jinja2",
type(str("jinja2"), (), {"__file__": "/site-packages/jinja2"}),
)
@patch(
"salt.utils.thin.yaml",
type(str("yaml"), (), {"__file__": "/site-packages/yaml"}),
)
@patch(
"salt.utils.thin.tornado",
type(str("tornado"), (), {"__file__": "/site-packages/tornado"}),
)
@patch(
"salt.utils.thin.msgpack",
type(str("msgpack"), (), {"__file__": "/site-packages/msgpack"}),
)
@patch(
"salt.utils.thin.certifi",
type(str("certifi"), (), {"__file__": "/site-packages/certifi"}),
)
@patch(
"salt.utils.thin.singledispatch",
type(str("singledispatch"), (), {"__file__": "/site-packages/sdp"}),
)
@patch(
"salt.utils.thin.singledispatch_helpers",
type(str("singledispatch_helpers"), (), {"__file__": "/site-packages/sdp_hlp"}),
)
@patch(
"salt.utils.thin.ssl_match_hostname",
type(str("ssl_match_hostname"), (), {"__file__": "/site-packages/ssl_mh"}),
)
@patch(
"salt.utils.thin.markupsafe",
type(str("markupsafe"), (), {"__file__": "/site-packages/markupsafe"}),
)
@patch(
"salt.utils.thin.backports_abc",
type(str("backports_abc"), (), {"__file__": "/site-packages/backports_abc"}),
)
@patch(
"salt.utils.thin.concurrent",
type(str("concurrent"), (), {"__file__": "/site-packages/concurrent"}),
)
@patch("salt.utils.thin.log", MagicMock())
def test_get_tops(self):
"""
Test thin.get_tops to get top directories, based on the interpreter.
:return:
"""
base_tops = [
"/site-packages/distro",
"/site-packages/salt",
"/site-packages/jinja2",
"/site-packages/yaml",
"/site-packages/tornado",
"/site-packages/msgpack",
"/site-packages/certifi",
"/site-packages/sdp",
"/site-packages/sdp_hlp",
"/site-packages/ssl_mh",
"/site-packages/markupsafe",
"/site-packages/backports_abc",
"/site-packages/concurrent",
]
tops = thin.get_tops()
assert len(tops) == len(base_tops)
assert sorted(tops) == sorted(base_tops)
@patch(
"salt.utils.thin.distro",
type("distro", (), {"__file__": "/site-packages/distro"}),
)
@patch(
"salt.utils.thin.salt",
type(str("salt"), (), {"__file__": "/site-packages/salt"}),
)
@patch(
"salt.utils.thin.jinja2",
type(str("jinja2"), (), {"__file__": "/site-packages/jinja2"}),
)
@patch(
"salt.utils.thin.yaml",
type(str("yaml"), (), {"__file__": "/site-packages/yaml"}),
)
@patch(
"salt.utils.thin.tornado",
type(str("tornado"), (), {"__file__": "/site-packages/tornado"}),
)
@patch(
"salt.utils.thin.msgpack",
type(str("msgpack"), (), {"__file__": "/site-packages/msgpack"}),
)
@patch(
"salt.utils.thin.certifi",
type(str("certifi"), (), {"__file__": "/site-packages/certifi"}),
)
@patch(
"salt.utils.thin.singledispatch",
type(str("singledispatch"), (), {"__file__": "/site-packages/sdp"}),
)
@patch(
"salt.utils.thin.singledispatch_helpers",
type(str("singledispatch_helpers"), (), {"__file__": "/site-packages/sdp_hlp"}),
)
@patch(
"salt.utils.thin.ssl_match_hostname",
type(str("ssl_match_hostname"), (), {"__file__": "/site-packages/ssl_mh"}),
)
@patch(
"salt.utils.thin.markupsafe",
type(str("markupsafe"), (), {"__file__": "/site-packages/markupsafe"}),
)
@patch(
"salt.utils.thin.backports_abc",
type(str("backports_abc"), (), {"__file__": "/site-packages/backports_abc"}),
)
@patch(
"salt.utils.thin.concurrent",
type(str("concurrent"), (), {"__file__": "/site-packages/concurrent"}),
)
@patch("salt.utils.thin.log", MagicMock())
def test_get_tops_extra_mods(self):
"""
Test thin.get_tops to get extra-modules alongside the top directories, based on the interpreter.
:return:
"""
base_tops = [
"/site-packages/distro",
"/site-packages/salt",
"/site-packages/jinja2",
"/site-packages/yaml",
"/site-packages/tornado",
"/site-packages/msgpack",
"/site-packages/certifi",
"/site-packages/sdp",
"/site-packages/sdp_hlp",
"/site-packages/ssl_mh",
"/site-packages/concurrent",
"/site-packages/markupsafe",
"/site-packages/backports_abc",
os.sep + os.path.join("custom", "foo"),
os.sep + os.path.join("custom", "bar.py"),
]
builtins = sys.version_info.major == 3 and "builtins" or "__builtin__"
foo = {"__file__": os.sep + os.path.join("custom", "foo", "__init__.py")}
bar = {"__file__": os.sep + os.path.join("custom", "bar")}
with patch(
"{}.__import__".format(builtins),
MagicMock(
side_effect=[type(str("foo"), (), foo), type(str("bar"), (), bar)]
),
):
tops = thin.get_tops(extra_mods="foo,bar")
self.assertEqual(len(tops), len(base_tops))
self.assertListEqual(sorted(tops), sorted(base_tops))
@patch(
"salt.utils.thin.distro",
type("distro", (), {"__file__": "/site-packages/distro"}),
)
@patch(
"salt.utils.thin.salt",
type(str("salt"), (), {"__file__": "/site-packages/salt"}),
)
@patch(
"salt.utils.thin.jinja2",
type(str("jinja2"), (), {"__file__": "/site-packages/jinja2"}),
)
@patch(
"salt.utils.thin.yaml",
type(str("yaml"), (), {"__file__": "/site-packages/yaml"}),
)
@patch(
"salt.utils.thin.tornado",
type(str("tornado"), (), {"__file__": "/site-packages/tornado"}),
)
@patch(
"salt.utils.thin.msgpack",
type(str("msgpack"), (), {"__file__": "/site-packages/msgpack"}),
)
@patch(
"salt.utils.thin.certifi",
type(str("certifi"), (), {"__file__": "/site-packages/certifi"}),
)
@patch(
"salt.utils.thin.singledispatch",
type(str("singledispatch"), (), {"__file__": "/site-packages/sdp"}),
)
@patch(
"salt.utils.thin.singledispatch_helpers",
type(str("singledispatch_helpers"), (), {"__file__": "/site-packages/sdp_hlp"}),
)
@patch(
"salt.utils.thin.ssl_match_hostname",
type(str("ssl_match_hostname"), (), {"__file__": "/site-packages/ssl_mh"}),
)
@patch(
"salt.utils.thin.markupsafe",
type(str("markupsafe"), (), {"__file__": "/site-packages/markupsafe"}),
)
@patch(
"salt.utils.thin.backports_abc",
type(str("backports_abc"), (), {"__file__": "/site-packages/backports_abc"}),
)
@patch(
"salt.utils.thin.concurrent",
type(str("concurrent"), (), {"__file__": "/site-packages/concurrent"}),
)
@patch("salt.utils.thin.log", MagicMock())
def test_get_tops_so_mods(self):
"""
Test thin.get_tops to get extra-modules alongside the top directories, based on the interpreter.
:return:
"""
base_tops = [
"/site-packages/distro",
"/site-packages/salt",
"/site-packages/jinja2",
"/site-packages/yaml",
"/site-packages/tornado",
"/site-packages/msgpack",
"/site-packages/certifi",
"/site-packages/sdp",
"/site-packages/sdp_hlp",
"/site-packages/ssl_mh",
"/site-packages/concurrent",
"/site-packages/markupsafe",
"/site-packages/backports_abc",
"/custom/foo.so",
"/custom/bar.so",
]
builtins = sys.version_info.major == 3 and "builtins" or "__builtin__"
with patch(
"{}.__import__".format(builtins),
MagicMock(
side_effect=[
type(str("salt"), (), {"__file__": "/custom/foo.so"}),
type(str("salt"), (), {"__file__": "/custom/bar.so"}),
]
),
):
tops = thin.get_tops(so_mods="foo,bar")
assert len(tops) == len(base_tops)
assert sorted(tops) == sorted(base_tops)
@patch("salt.utils.thin.gen_thin", MagicMock(return_value="/path/to/thin/thin.tgz"))
@patch("salt.utils.hashutils.get_hash", MagicMock(return_value=12345))
def test_thin_sum(self):
"""
Test thin.thin_sum function.
:return:
"""
assert thin.thin_sum("/cachedir", form="sha256")[1] == 12345
thin.salt.utils.hashutils.get_hash.assert_called()
assert thin.salt.utils.hashutils.get_hash.call_count == 1
path, form = thin.salt.utils.hashutils.get_hash.call_args[0]
assert path == "/path/to/thin/thin.tgz"
assert form == "sha256"
@patch("salt.utils.thin.gen_min", MagicMock(return_value="/path/to/thin/min.tgz"))
@patch("salt.utils.hashutils.get_hash", MagicMock(return_value=12345))
def test_min_sum(self):
"""
Test thin.thin_sum function.
:return:
"""
assert thin.min_sum("/cachedir", form="sha256") == 12345
thin.salt.utils.hashutils.get_hash.assert_called()
assert thin.salt.utils.hashutils.get_hash.call_count == 1
path, form = thin.salt.utils.hashutils.get_hash.call_args[0]
assert path == "/path/to/thin/min.tgz"
assert form == "sha256"
@patch("salt.utils.thin.sys.version_info", (2, 5))
@patch("salt.exceptions.SaltSystemExit", Exception)
def test_gen_thin_fails_ancient_python_version(self):
"""
Test thin.gen_thin function raises an exception
if Python major/minor version is lower than 2.6
:return:
"""
with pytest.raises(salt.exceptions.SaltSystemExit) as err:
thin.sys.exc_clear = lambda: None
thin.gen_thin("")
self.assertIn(
"The minimum required python version to run salt-ssh is " '"2.6"',
str(err.value),
)
@skipIf(
salt.utils.platform.is_windows() and thin._six.PY2, "Dies on Python2 on Windows"
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/foo3", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch("salt.utils.path.os_walk", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", MagicMock())
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock())
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.path.which", MagicMock(return_value=""))
@patch("salt.utils.thin._get_thintar_prefix", MagicMock())
def test_gen_thin_python_exist_or_not(self):
"""
Test thin.gen_thin function if the opposite python
binary does not exist
"""
with TstSuiteLoggingHandler() as handler:
thin.gen_thin("")
salt.utils.thin.subprocess.Popen.assert_not_called()
if salt.ext.six.PY2:
self.assertIn(
"DEBUG:python3 binary does not exist. Will not attempt to generate "
"tops for Python 3",
handler.messages,
)
if salt.ext.six.PY3:
self.assertIn(
"DEBUG:python2 binary does not exist. Will not "
"detect Python 2 version",
handler.messages,
)
self.assertIn(
"DEBUG:python2 binary does not exist. Will not attempt to generate "
"tops for Python 2",
handler.messages,
)
@skipIf(
salt.utils.platform.is_windows() and thin._six.PY2, "Dies on Python2 on Windows"
)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/foo3", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch("salt.utils.path.os_walk", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", MagicMock())
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock())
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin._six.PY3", True)
@patch("salt.utils.thin._six.PY2", False)
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_compression_fallback_py3(self):
"""
Test thin.gen_thin function if fallbacks to the gzip compression, once setup wrong.
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
thin.gen_thin("", compress="arj")
thin.log.warning.assert_called()
pt, msg = thin.log.warning.mock_calls[0][1]
assert (
pt % msg
== 'Unknown compression type: "arj". Falling back to "gzip" compression.'
)
thin.zipfile.ZipFile.assert_not_called()
thin.tarfile.open.assert_called()
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/foo3", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=False))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch("salt.utils.path.os_walk", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", MagicMock())
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin._six.PY3", True)
@patch("salt.utils.thin._six.PY2", False)
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_control_files_written_py3(self):
"""
Test thin.gen_thin function if control files are written (version, salt-call etc).
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
thin.gen_thin("")
arc_name, arc_mode = thin.tarfile.method_calls[0][1]
self.assertEqual(arc_name, ".temporary")
self.assertEqual(arc_mode, "w:gz")
for idx, fname in enumerate(
["version", ".thin-gen-py-version", "salt-call", "supported-versions"]
):
name = thin.tarfile.open().method_calls[idx + 4][1][0]
self.assertEqual(name, fname)
thin.tarfile.open().close.assert_called()
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=["/salt", "/bar3"]))
@patch("salt.utils.thin.get_ext_tops", MagicMock(return_value={}))
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch(
"salt.utils.path.os_walk",
MagicMock(
return_value=(
("root", [], ["r1", "r2", "r3"]),
("root2", [], ["r4", "r5", "r6"]),
)
),
)
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", _tarfile(None))
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin._six.PY3", True)
@patch("salt.utils.thin._six.PY2", False)
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.hashutils.DigestCollector", MagicMock())
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_main_content_files_written_py3(self):
"""
Test thin.gen_thin function if main content files are written.
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
thin.gen_thin("")
files = []
for py in ("py2", "py2", "py3", "pyall"):
for i in range(1, 4):
files.append(os.path.join(py, "root", "r{0}".format(i)))
for i in range(4, 7):
files.append(os.path.join(py, "root2", "r{0}".format(i)))
for cl in thin.tarfile.open().method_calls[:-6]:
arcname = cl[2].get("arcname")
self.assertIn(arcname, files)
files.pop(files.index(arcname))
self.assertFalse(files)
@patch("salt.exceptions.SaltSystemExit", Exception)
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.makedirs", MagicMock())
@patch("salt.utils.files.fopen", MagicMock())
@patch("salt.utils.thin._get_salt_call", MagicMock())
@patch("salt.utils.thin._get_ext_namespaces", MagicMock())
@patch("salt.utils.thin.get_tops", MagicMock(return_value=[]))
@patch(
"salt.utils.thin.get_ext_tops",
MagicMock(
return_value={
"namespace": {
"py-version": [2, 7],
"path": "/opt/2015.8/salt",
"dependencies": ["/opt/certifi", "/opt/whatever"],
}
}
),
)
@patch("salt.utils.thin.os.path.isfile", MagicMock())
@patch("salt.utils.thin.os.path.isdir", MagicMock(return_value=True))
@patch("salt.utils.thin.log", MagicMock())
@patch("salt.utils.thin.os.remove", MagicMock())
@patch("salt.utils.thin.os.path.exists", MagicMock())
@patch(
"salt.utils.path.os_walk",
MagicMock(
return_value=(
("root", [], ["r1", "r2", "r3"]),
("root2", [], ["r4", "r5", "r6"]),
)
),
)
@patch(
"salt.utils.thin.subprocess.Popen",
_popen(
None,
side_effect=[(bts("2.7"), bts("")), (bts('["/foo27", "/bar27"]'), bts(""))],
),
)
@patch("salt.utils.thin.tarfile", _tarfile(None))
@patch("salt.utils.thin.zipfile", MagicMock())
@patch("salt.utils.thin.os.getcwd", MagicMock())
@patch("salt.utils.thin.os.chdir", MagicMock())
@patch("salt.utils.thin.os.close", MagicMock())
@patch("salt.utils.thin.tempfile.mkdtemp", MagicMock(return_value=""))
@patch(
"salt.utils.thin.tempfile.mkstemp", MagicMock(return_value=(3, ".temporary"))
)
@patch("salt.utils.thin.shutil", MagicMock())
@patch("salt.utils.thin._six.PY3", True)
@patch("salt.utils.thin._six.PY2", False)
@patch("salt.utils.thin.sys.version_info", _version_info(None, 3, 6))
@patch("salt.utils.hashutils.DigestCollector", MagicMock())
@patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/python"))
def test_gen_thin_ext_alternative_content_files_written_py3(self):
"""
Test thin.gen_thin function if external alternative content files are written.
NOTE: Py2 version of this test is not required, as code shares the same spot across the versions.
:return:
"""
ext_conf = {
"namespace": {
"py-version": [2, 7],
"path": "/opt/2015.8/salt",
"dependencies": {
"certifi": "/opt/certifi",
"whatever": "/opt/whatever",
},
}
}
thin.gen_thin("", extended_cfg=ext_conf)
files = []
for py in ("pyall", "pyall", "py2"):
for i in range(1, 4):
files.append(os.path.join("namespace", py, "root", "r{0}".format(i)))
for i in range(4, 7):
files.append(os.path.join("namespace", py, "root2", "r{0}".format(i)))
for idx, cl in enumerate(thin.tarfile.open().method_calls[12:-6]):
arcname = cl[2].get("arcname")
self.assertIn(arcname, files)
files.pop(files.index(arcname))
self.assertFalse(files)
def test_get_supported_py_config_typecheck(self):
"""
Test collecting proper py-versions. Should return bytes type.
:return:
"""
tops = {}
ext_cfg = {}
out = thin._get_supported_py_config(tops=tops, extended_cfg=ext_cfg)
assert type(salt.utils.stringutils.to_bytes("")) == type(out)
def test_get_supported_py_config_base_tops(self):
"""
Test collecting proper py-versions. Should return proper base tops.
:return:
"""
tops = {"3": ["/groundkeepers", "/stole"], "2": ["/the-root", "/password"]}
ext_cfg = {}
out = (
salt.utils.stringutils.to_str(
thin._get_supported_py_config(tops=tops, extended_cfg=ext_cfg)
)
.strip()
.split(os.linesep)
)
self.assertEqual(len(out), 2)
for t_line in ["py3:3:0", "py2:2:7"]:
self.assertIn(t_line, out)
def test_get_supported_py_config_ext_tops(self):
"""
Test collecting proper py-versions. Should return proper ext conf tops.
:return:
"""
tops = {}
ext_cfg = {
"solar-interference": {"py-version": [2, 6]},
"second-system-effect": {"py-version": [2, 7]},
}
out = (
salt.utils.stringutils.to_str(
thin._get_supported_py_config(tops=tops, extended_cfg=ext_cfg)
)
.strip()
.split(os.linesep)
)
for t_line in ["second-system-effect:2:7", "solar-interference:2:6"]:
self.assertIn(t_line, out)
def test_get_tops_python(self):
"""
test get_tops_python
"""
patch_proc = patch(
"salt.utils.thin.subprocess.Popen",
self._popen(
None,
side_effect=[
(bts("distro.py"), bts("")),
(bts("jinja2/__init__.py"), bts("")),
(bts("yaml/__init__.py"), bts("")),
(bts("tornado/__init__.py"), bts("")),
(bts("msgpack/__init__.py"), bts("")),
(bts("certifi/__init__.py"), bts("")),
(bts("singledispatch.py"), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
],
),
)
patch_os = patch("os.path.exists", return_value=True)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_proc, patch_os, patch_which:
with TstSuiteLoggingHandler() as log_handler:
ret = thin.get_tops_python("python2.7")
assert ret == self.exp_ret
assert (
"ERROR:Could not auto detect file location for module concurrent for python version python2.7"
in log_handler.messages
)
def test_get_tops_python_exclude(self):
"""
test get_tops_python when excluding modules
"""
patch_proc = patch(
"salt.utils.thin.subprocess.Popen",
self._popen(
None,
side_effect=[
(bts("distro.py"), bts("")),
(bts("tornado/__init__.py"), bts("")),
(bts("msgpack/__init__.py"), bts("")),
(bts("certifi/__init__.py"), bts("")),
(bts("singledispatch.py"), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
],
),
)
exp_ret = copy.deepcopy(self.exp_ret)
for lib in self.exc_libs:
exp_ret.pop(lib)
patch_os = patch("os.path.exists", return_value=True)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_proc, patch_os, patch_which:
ret = thin.get_tops_python("python2.7", exclude=self.exc_libs)
assert ret == exp_ret
def test_pack_alternatives_exclude(self):
"""
test pack_alternatives when mixing
manually set dependencies and auto
detecting other modules.
"""
patch_proc = patch(
"salt.utils.thin.subprocess.Popen",
self._popen(
None,
side_effect=[
(bts(self.fake_libs["distro"]), bts("")),
(bts(self.fake_libs["yaml"]), bts("")),
(bts(self.fake_libs["tornado"]), bts("")),
(bts(self.fake_libs["msgpack"]), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
(bts(""), bts("")),
],
),
)
patch_os = patch("os.path.exists", return_value=True)
ext_conf = copy.deepcopy(self.ext_conf)
ext_conf["test"]["auto_detect"] = True
for lib in self.fake_libs.values():
os.makedirs(lib)
with salt.utils.files.fopen(os.path.join(lib, "__init__.py"), "w+") as fp_:
fp_.write("test")
exp_files = self.exp_files.copy()
exp_files.extend(
[
os.path.join("yaml", "__init__.py"),
os.path.join("tornado", "__init__.py"),
os.path.join("msgpack", "__init__.py"),
]
)
patch_which = patch("salt.utils.path.which", return_value=True)
with patch_os, patch_proc, patch_which:
thin._pack_alternative(ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in exp_files:
assert [x for x in calls if "{}".format(_file) in x.args]
def test_pack_alternatives(self):
"""
test thin._pack_alternatives
"""
with patch("salt.utils.thin.get_ext_tops", MagicMock(return_value=self.tops)):
thin._pack_alternative(self.ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in self.exp_files:
assert [x for x in calls if "{}".format(_file) in x.args]
assert [
x
for x in calls
if os.path.join("test", "pyall", _file) in x.kwargs["arcname"]
]
def test_pack_alternatives_not_normalized(self):
"""
test thin._pack_alternatives when the path
is not normalized
"""
tops = copy.deepcopy(self.tops)
tops["test"]["dependencies"] = [self.jinja_fp + "/"]
with patch("salt.utils.thin.get_ext_tops", MagicMock(return_value=tops)):
thin._pack_alternative(self.ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in self.exp_files:
assert [x for x in calls if "{}".format(_file) in x.args]
assert [
x
for x in calls
if os.path.join("test", "pyall", _file) in x.kwargs["arcname"]
]
def test_pack_alternatives_path_doesnot_exist(self):
"""
test thin._pack_alternatives when the path
doesnt exist. Check error log message
and expect that because the directory
does not exist jinja2 does not get
added to the tar
"""
bad_path = os.path.join(tempfile.gettempdir(), "doesnotexisthere")
tops = copy.deepcopy(self.tops)
tops["test"]["dependencies"] = [bad_path]
with patch("salt.utils.thin.get_ext_tops", MagicMock(return_value=tops)):
with TstSuiteLoggingHandler() as log_handler:
thin._pack_alternative(self.ext_conf, self.digest, self.tar)
msg = "ERROR:File path {} does not exist. Unable to add to salt-ssh thin".format(
bad_path
)
assert msg in log_handler.messages
calls = self.tar.mock_calls
for _file in self.exp_files:
arg = [x for x in calls if "{}".format(_file) in x.args]
kwargs = [
x
for x in calls
if os.path.join("test", "pyall", _file) in x.kwargs["arcname"]
]
if "jinja2" in _file:
assert not arg
assert not kwargs
else:
assert arg
assert kwargs
def test_pack_alternatives_auto_detect(self):
"""
test thin._pack_alternatives when auto_detect
is enabled
"""
ext_conf = copy.deepcopy(self.ext_conf)
ext_conf["test"]["auto_detect"] = True
for lib in self.fake_libs.values():
os.makedirs(lib)
with salt.utils.files.fopen(os.path.join(lib, "__init__.py"), "w+") as fp_:
fp_.write("test")
patch_tops_py = patch(
"salt.utils.thin.get_tops_python", return_value=self.fake_libs
)
exp_files = self.exp_files.copy()
exp_files.extend(
[
os.path.join("yaml", "__init__.py"),
os.path.join("tornado", "__init__.py"),
os.path.join("msgpack", "__init__.py"),
]
)
with patch_tops_py:
thin._pack_alternative(ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in exp_files:
assert [x for x in calls if "{}".format(_file) in x.args]
def test_pack_alternatives_empty_dependencies(self):
"""
test _pack_alternatives when dependencies is not
set in the config.
"""
ext_conf = copy.deepcopy(self.ext_conf)
ext_conf["test"]["auto_detect"] = True
ext_conf["test"].pop("dependencies")
for lib in self.fake_libs.values():
os.makedirs(lib)
with salt.utils.files.fopen(os.path.join(lib, "__init__.py"), "w+") as fp_:
fp_.write("test")
patch_tops_py = patch(
"salt.utils.thin.get_tops_python", return_value=self.fake_libs
)
exp_files = self.exp_files.copy()
exp_files.extend(
[
os.path.join("yaml", "__init__.py"),
os.path.join("tornado", "__init__.py"),
os.path.join("msgpack", "__init__.py"),
]
)
with patch_tops_py:
thin._pack_alternative(ext_conf, self.digest, self.tar)
calls = self.tar.mock_calls
for _file in exp_files:
assert [x for x in calls if "{}".format(_file) in x.args]
| 37.184129 | 114 | 0.555102 |
0a8f9e8905b0c060162277ed3effe15ffd39c0ac | 442 | py | Python | App/migrations/0006_auto_20180526_1053.py | NeverLeft/DjangoAXF | 9bdf731e2b5a607cbcf0d4faebb7e616daa1a0b4 | [
"Apache-2.0"
] | null | null | null | App/migrations/0006_auto_20180526_1053.py | NeverLeft/DjangoAXF | 9bdf731e2b5a607cbcf0d4faebb7e616daa1a0b4 | [
"Apache-2.0"
] | null | null | null | App/migrations/0006_auto_20180526_1053.py | NeverLeft/DjangoAXF | 9bdf731e2b5a607cbcf0d4faebb7e616daa1a0b4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-05-26 10:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('App', '0005_auto_20180526_1052'),
]
operations = [
migrations.RenameField(
model_name='foodtype',
old_name='childypenames',
new_name='childtypenames',
),
]
| 21.047619 | 49 | 0.617647 |
0d94c54bc997e9d31ff3e9896eddde832abc8cd9 | 1,336 | py | Python | cprint/cPrint.py | havelar/cPrintFormat | 4487481559fc78a34168550319ac4a02e7b84618 | [
"MIT"
] | null | null | null | cprint/cPrint.py | havelar/cPrintFormat | 4487481559fc78a34168550319ac4a02e7b84618 | [
"MIT"
] | null | null | null | cprint/cPrint.py | havelar/cPrintFormat | 4487481559fc78a34168550319ac4a02e7b84618 | [
"MIT"
] | null | null | null | from .src.cFormat import cFormat
def cprint(txt, color='black', bg=None, style='normal', end='\n', returning=False):
# Font Color process
if isinstance(color,str):
rgb = cFormat.colors.get(color.lower())
if rgb is None:
raise ValueError('Unkown color.')
elif isinstance(color,tuple) and len(color)==3:
rgb = color
else:
raise ValueError('Color must be string or (R,G,B). Not: {0}'.format(type(color)))
curr_style = cFormat.styles.get(style.lower())
if curr_style is None:
raise ValueError("Invalid style value: '{0}' with type '{1}'".format(curr_style, type(curr_style)))
prefix = cFormat.font_structure.format(*rgb, curr_style)
# BackGround process
if bg:
if isinstance(bg,str):
bg_rgb = cFormat.colors.get(bg.lower())
if bg_rgb is None:
raise ValueError('Unkown color.')
elif isinstance(bg,tuple) and len(bg)==3:
bg_rgb = bg
else:
raise ValueError('Color must be string or (R,G,B)')
if bg:
prefix = prefix + cFormat.bg_structure.format(*bg_rgb)
string = prefix + str(txt) + cFormat.end
if returning: # Return formated string to be used anywhere
return string
else: # Just print
print(string, end=end) | 35.157895 | 107 | 0.607784 |
8472e43d1f80829ce45afd353982c548cc09d678 | 651 | py | Python | Aula 37/Controller/linguagem_backend_controller.py | Katakhan/TrabalhosPython2 | ab47af0ff3c00922857578e58a1a149d9e65e229 | [
"MIT"
] | null | null | null | Aula 37/Controller/linguagem_backend_controller.py | Katakhan/TrabalhosPython2 | ab47af0ff3c00922857578e58a1a149d9e65e229 | [
"MIT"
] | null | null | null | Aula 37/Controller/linguagem_backend_controller.py | Katakhan/TrabalhosPython2 | ab47af0ff3c00922857578e58a1a149d9e65e229 | [
"MIT"
] | null | null | null | import sys
sys.path.append(r'C:\Users\900132\Desktop\GitHub\TrabalhosPython2\Aula 37')
from Dao.linguagem_backend_dao import BackDao
from Model.linguagem_backend import LinguagemBackend
class LinguagemBackendController:
dao = BackDao()
def listar_todos(self):
return self.dao.listar_todos()
def buscar_por_id(self, id):
return self.dao.buscar_por_id(id)
def salvar(self, linguagemBackend:LinguagemBackend):
return self.dao.salvar(linguagemBackend)
def alterar(self, linguagemBackend:LinguagemBackend):
self.dao.alterar(linguagemBackend)
def deletar(self, id):
self.dao.deletar(id)
| 28.304348 | 75 | 0.740399 |
0a31ca38e9ab8af82c929f39c27e42885c8ba17b | 476 | py | Python | tests/multi_lead_graphene_output/plot_bands.py | kmkolasinski/Bubel | dd0f66475c5d93060e48a6a46a8debf2f2a350d0 | [
"MIT"
] | null | null | null | tests/multi_lead_graphene_output/plot_bands.py | kmkolasinski/Bubel | dd0f66475c5d93060e48a6a46a8debf2f2a350d0 | [
"MIT"
] | null | null | null | tests/multi_lead_graphene_output/plot_bands.py | kmkolasinski/Bubel | dd0f66475c5d93060e48a6a46a8debf2f2a350d0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Created on Thu Mar 5 14:16:21 2015
@author: Krzysztof Kolasinski
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
file = "bands.dat"
data = np.loadtxt(file)
no_lines = np.size(data[0,:])
x = data[:,0]
for i in range(no_lines-1):
plt.plot(x,data[:,i+1],c='k',ls='-')
#plt.ylim([-0.0003,0.001])
plt.xlabel("k [1/unit size]")
plt.ylabel("Energy [in atomic units]")
plt.savefig("bands.png")
#plt.show()
| 19.04 | 45 | 0.617647 |
8c9a87a6619c9e4148e67b8b47f7bf949409a767 | 77,300 | py | Python | jina/flow/base.py | sakshamgurbhele/jina | 5565b55b233eaac9fc59e8361b3e18d78c4d9b15 | [
"Apache-2.0"
] | 1 | 2021-11-14T13:22:29.000Z | 2021-11-14T13:22:29.000Z | jina/flow/base.py | sakshamgurbhele/jina | 5565b55b233eaac9fc59e8361b3e18d78c4d9b15 | [
"Apache-2.0"
] | null | null | null | jina/flow/base.py | sakshamgurbhele/jina | 5565b55b233eaac9fc59e8361b3e18d78c4d9b15 | [
"Apache-2.0"
] | null | null | null | import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
from collections import OrderedDict
from contextlib import ExitStack
from typing import (
Optional,
Union,
Tuple,
List,
Set,
Dict,
overload,
Type,
TYPE_CHECKING,
)
from .builder import allowed_levels, _hanging_pods
from .. import __default_host__
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
InfrastructureType,
PollingType,
)
from ..excepts import (
FlowTopologyError,
FlowMissingPodError,
RoutingTableCyclicError,
RuntimeFailToStart,
)
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from ..jaml import JAMLCompatible
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from ..parsers.flow import set_flow_parser
from ..peapods import CompoundPod, Pod
from ..peapods.networking import is_remote_local_connection
from ..peapods.pods.factory import PodFactory
from ..peapods.pods.k8s import K8sPod
from ..types.routing.table import RoutingTable
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if TYPE_CHECKING:
from ..executors import BaseExecutor
from ..clients.base import BaseClient
from .asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
class _FlowK8sInfraResourcesManager:
def __init__(self, k8s_namespace: str, k8s_custom_resource_dir: Optional[str]):
self.k8s_namespace = k8s_namespace
self.k8s_custom_resource_dir = k8s_custom_resource_dir
self.namespace_created = False
def __enter__(self):
from ..peapods.pods.k8slib import kubernetes_tools, kubernetes_client
client = kubernetes_client.K8sClients().core_v1
list_namespaces = [
item.metadata.name for item in client.list_namespace().items
]
if self.k8s_namespace not in list_namespaces:
with JinaLogger(f'create_{self.k8s_namespace}') as logger:
logger.info(f'🏝️\tCreate Namespace "{self.k8s_namespace}"')
kubernetes_tools.create(
'namespace',
{'name': self.k8s_namespace},
logger=logger,
custom_resource_dir=self.k8s_custom_resource_dir,
)
self.namespace_created = True
def __exit__(self, exc_type, exc_val, exc_tb):
from ..peapods.pods.k8slib import kubernetes_client
if self.namespace_created:
client = kubernetes_client.K8sClients().core_v1
client.delete_namespace(name=self.k8s_namespace)
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
cors: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = True,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
port_ctrl: Optional[int] = None,
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
port_out: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCRuntime',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_CONNECT',
socket_out: Optional[str] = 'PUSH_CONNECT',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_out: The port for output data, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
static_routing_table: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
self.k8s_infrastructure_manager = None
if self.args.infrastructure == InfrastructureType.K8S:
self.k8s_infrastructure_manager = self._FlowK8sInfraResourcesManager(
k8s_namespace=self.args.k8s_namespace or self.args.name,
k8s_custom_resource_dir=getattr(
self.args, 'k8s_custom_resource_dir', None
),
)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from .asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(self, needs, **kwargs):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
k8s_namespace=self.args.k8s_namespace or self.args.name,
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.k8s_namespace = self.args.k8s_namespace or self.args.name
args.connect_to_predecessor = False
args.noblock_on_start = True
self._pod_nodes[GATEWAY_NAME] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connect_to_predecessor: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = False,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = None,
native: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_ctrl: Optional[int] = None,
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
port_out: Optional[int] = None,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'ZEDRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_BIND',
socket_out: Optional[str] = 'PUSH_BIND',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connect_to_predecessor: The head Pea of this Pod will connect to the TailPea of the predecessor Pod.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param peas_hosts: The hosts of the peas when shards greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod (when `shards>1`)
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param port_out: The port for output data, default a random port between [49152, 65535]
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# grpc data runtime does not support sharding at the moment
if (
args.grpc_data_requests
and kwargs.get('shards') is not None
and kwargs.get('shards', 1) > 1
and self.args.infrastructure != InfrastructureType.K8S
):
raise NotImplementedError("GRPC data runtime does not support sharding")
if args.grpc_data_requests and args.runtime_cls == 'ZEDRuntime':
args.runtime_cls = 'GRPCDataRuntime'
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.k8s_namespace = self.args.k8s_namespace or self.args.name
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
args.zmq_identity = None
# BACKWARDS COMPATIBILITY:
# We assume that this is used in a search Flow if replicas and shards are used
# Thus the polling type should be all
# But dont override any user provided polling
if args.replicas > 1 and args.shards > 1 and 'polling' not in kwargs:
args.polling = PollingType.ALL
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
# TODO needs to be refactored - deployment should not be a dictionary. Related Ticket:
# https://github.com/jina-ai/jina/issues/3280
def _get_routing_table(self) -> RoutingTable:
graph = RoutingTable()
for pod_id, pod in self._pod_nodes.items():
if pod_id == GATEWAY_NAME:
deployment = pod.deployments[0]
graph.add_pod(
f'start-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
graph.add_pod(
f'end-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
else:
for deployment in pod.deployments:
graph.add_pod(
deployment['name'],
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
for end, pod in self._pod_nodes.items():
if end == GATEWAY_NAME:
end = f'end-{GATEWAY_NAME}'
if pod.head_args.hosts_in_connect is None:
pod.head_args.hosts_in_connect = []
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
if end not in graph.pods:
end = end + '_head'
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
for start in pod.needs:
start_pod = self._pod_nodes[start]
if start == GATEWAY_NAME:
start = f'start-{GATEWAY_NAME}'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
if start not in graph.pods:
start = start + '_tail'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
start_pod = graph._get_target_pod(start)
if pod.connect_to_predecessor or is_remote_local_connection(
start_pod.host, pod.head_host
):
pod.head_args.hosts_in_connect.append(
graph._get_target_pod(start).full_out_address
)
graph.add_edge(start, end, True)
else:
graph.add_edge(start, end)
# In case of sharding, the head and the tail pea have to be connected to the shards
for end, pod in self._pod_nodes.items():
if len(pod.deployments) > 0:
deployments = pod.deployments
for deployment in deployments[1:-1]:
graph.add_edge(deployments[0]['name'], deployment['name'])
graph.add_edge(deployment['name'], deployments[-1]['name'])
graph.active_pod = f'start-{GATEWAY_NAME}'
return graph
def _set_initial_dynamic_routing_table(self):
routing_table = self._get_routing_table()
if not routing_table.is_acyclic():
raise RoutingTableCyclicError(
'The routing graph has a cycle. This would result in an infinite loop. Fix your Flow setup.'
)
for pod in self._pod_nodes:
routing_table_copy = RoutingTable()
routing_table_copy.proto.CopyFrom(routing_table.proto)
self._pod_nodes[
pod
].args.static_routing_table = self.args.static_routing_table
# The gateway always needs the routing table to be set
if pod == GATEWAY_NAME:
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# For other pods we only set it if we are told do so
elif self.args.static_routing_table:
routing_table_copy.active_pod = pod
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# dynamic routing does not apply to shards in a CompoundPod, only its tail
if not isinstance(self._pod_nodes[pod], CompoundPod):
self._pod_nodes[pod].update_pea_args()
else:
self._pod_nodes[pod].tail_args.routing_table = self._pod_nodes[
pod
].args.routing_table
self._pod_nodes[
pod
].tail_args.static_routing_table = self.args.static_routing_table
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(needs={op_flow.last_pod})
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
op_flow._pod_nodes = {
k: v for k, v in op_flow._pod_nodes.items() if not v.role.is_inspect
}
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
op_flow._set_initial_dynamic_routing_table()
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.BasePea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
if self.k8s_infrastructure_manager is not None:
self.enter_context(self.k8s_infrastructure_manager)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not getattr(v.args, 'external', False):
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not getattr(_pod.args, 'external', False):
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = None
if self.args.infrastructure != InfrastructureType.K8S:
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
if self.args.infrastructure == InfrastructureType.K8S:
success_msg = colored('🎉 Kubernetes Flow is ready to use!', 'green')
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if getattr(self._pod_nodes[need].args, 'external', False):
_s_role = 'EXTERNAL'
if getattr(self._pod_nodes[node].args, 'external', False):
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(f'classDef {str(PodRoleType.INSPECT)} stroke:#F29C9F')
mermaid_graph.append(f'classDef {str(PodRoleType.JOIN_INSPECT)} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow
:param value: the protocol to set
"""
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
dump_path: Optional[str] = None,
*,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param dump_path: **backwards compatibility** This function was only accepting dump_path as the only potential arg to override
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from ..helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
dump_path=dump_path,
uses_with=uses_with,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
| 41.648707 | 283 | 0.612393 |
2366d3b29e85a30dbf84e0b96269af504eb4c167 | 1,494 | py | Python | copper_sdk/activities.py | Agilicus/copper-sdk | dfdecd4aa76bdd47661fdd4bfada7781f8eae835 | [
"MIT"
] | 4 | 2021-01-03T07:40:01.000Z | 2021-09-03T09:21:02.000Z | copper_sdk/activities.py | Agilicus/copper-sdk | dfdecd4aa76bdd47661fdd4bfada7781f8eae835 | [
"MIT"
] | 5 | 2020-09-03T17:28:13.000Z | 2021-10-04T22:47:23.000Z | copper_sdk/activities.py | Agilicus/copper-sdk | dfdecd4aa76bdd47661fdd4bfada7781f8eae835 | [
"MIT"
] | 3 | 2020-09-02T14:54:46.000Z | 2021-09-02T18:12:45.000Z | from copper_sdk.base import BaseResource
class Activities(BaseResource):
def __init__(self, copper):
self.copper = copper
def get(self, id):
return self.copper.get(f'/activities/{id}')
def create(self, body=None):
if body is None:
body = {}
return self.copper.post('/activities', body)
def delete(self, id):
return self.copper.delete(f'/activities/{id}')
def list(self, body=None):
if body is None:
body = {}
default_body = {
# 'parent': {}, # hash A hash describing the resource to which activities must belong (footnote 1).
# 'activity_types': {}, # activity_type[] The activity types to filter results on (footnote 1). none
'page_number': 1, # number The page number (starting with 1) that you would like to view. 1
'page_size': 20, # number The number of entries included in a page of results 20
# 'minimum_activity_date': "", # number The Unix timestamp of the earliest activity date. none
# 'maximum_activity_date': "", # number The Unix timestamp of the latest activity date. none
'full_result': False, # boolean (Optional) If set to true, search performance improves but duplicate activity logs may be returned (footnote 3). false
}
return self.copper.post('/activities/search', { **default_body, **body})
def types(self):
return self.copper.get('/activity_types')
| 40.378378 | 162 | 0.630522 |
bc952f01badf96b7ea39053431fb4cfd9b7508ba | 7,553 | py | Python | OntoMapper.py | shadi-tabasi/IOPE | 330572bad72d2af92eb55388c65ae01b4a16b3fb | [
"CC0-1.0"
] | null | null | null | OntoMapper.py | shadi-tabasi/IOPE | 330572bad72d2af92eb55388c65ae01b4a16b3fb | [
"CC0-1.0"
] | null | null | null | OntoMapper.py | shadi-tabasi/IOPE | 330572bad72d2af92eb55388c65ae01b4a16b3fb | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from rdflib.graph import Graph
from rdflib import URIRef
import Queries, OntoConcepts, UtilityFunctions
import unicodedata, os, time, sys
import random
def OntoMapper(AtelierURI, AtelierLabel):
StartTime = time.time()
# input parameters
OntoFileName = "onto.nt"
# QueryParameter = "samsei:PriseEnChargeUnArretCardioRespiratoire"
QueryParameter = AtelierURI
QueryParameterLabel = AtelierLabel
# QueryParameterLabel = "Prise en charge d'un arret cardio respiratoire"
print("Mapping ontlogy for "+QueryParameter+" ...")
QueryParameterWithoutDots = QueryParameter.replace(":","")
DirName = "static/HTMLPages/Atelier_"+QueryParameterWithoutDots
try: os.mkdir(DirName)
except: pass
MyGraph = Graph()
MyGraph.parse(OntoFileName, format="nt")
Namespace = dict(owl="http://www.w3.org/2002/07/owl#",samsei="http://my.ontology.fr/sgm#")
HasValueConstraints = OntoConcepts.OntoQuery(Queries.HasValueParamQuery, QueryParameter, MyGraph, Namespace)
HasValueConstraintsResults = HasValueConstraints.RunQuery()
CardinalityMaxConstraints = OntoConcepts.OntoQuery(Queries.CardinalityMaxParamQuery, QueryParameter, MyGraph, Namespace)
CardinalityMaxResults = CardinalityMaxConstraints.RunQuery()
CardinalityConstraints = OntoConcepts.OntoQuery(Queries.CardinalityMinParamQuery, QueryParameter, MyGraph, Namespace)
CardinalityMinResults = CardinalityConstraints.RunQuery()
AlternativeValueConstraints = OntoConcepts.OntoQuery(Queries.AlternativeParamQuery, QueryParameter, MyGraph, Namespace)
AlternativeValueConstraintsResults = AlternativeValueConstraints.RunQuery()
OntoGlobal = OntoConcepts.OntoGlobalClass()
for ResultRow in HasValueConstraintsResults:
HasValueConstraint = OntoConcepts.HasValueConstraintClass()
HasValueConstraint.Fill(ResultRow)
OntoGlobal.Update(HasValueConstraint, "HasValue")
for ResultRow in CardinalityMaxResults:
CardinalityMaxConstraint = OntoConcepts.CardinalityConstraintClass("Max")
CardinalityMaxConstraint.Fill(ResultRow)
OntoGlobal.Update(CardinalityMaxConstraint, "Max")
OntoGlobal.UpdateMinMax(CardinalityMaxConstraint, MyGraph, Namespace)
for ResultRow in CardinalityMinResults:
CardinalityMinConstraint = OntoConcepts.CardinalityConstraintClass("Min")
CardinalityMinConstraint.Fill(ResultRow)
OntoGlobal.Update(CardinalityMinConstraint, "Min")
OntoGlobal.UpdateMinMax(CardinalityMinConstraint, MyGraph, Namespace)
for ResultRow in AlternativeValueConstraintsResults:
AlternativeConstraint = OntoConcepts.AlternativeConstraintClass()
AlternativeConstraint.Fill(ResultRow)
OntoGlobal.Update(AlternativeConstraint, "Alter")
OntoGlobal.UpdateAlter(AlternativeConstraint, "Alter")
# AtelierIndexPage = open(DirName+"/AtelierIndex.html","w")
# AtelierIndexPage.write("<html><head><meta charset='UTF-8'><meta name='viewport' content='width=device-width, initial-scale=1'><link rel='stylesheet' href='https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'><script src='https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js'></script><style>body {padding: 20px;} a {cursor: pointer;}</style></head><body><div class='container'><div class='jumbotron'><h2>Pages du formulaire décrivant l'atelier \""+ AtelierLabel + "\":</h2></div><div class='card'><h4 class='card-header'>Accéder à chaque page pour compléter et valider les différentes sections</h4><div class='card-body'><p><b>Pages vues jusqu'à présent: </b>{{ cookie_content }}</p>")
# AtelierIndexPage.write("{{ session_name }} <br>\n")
# HTML generation
count_pages = 0
for Parent in OntoGlobal.Parents:
count_pages += 1
print("page",count_pages)
SeenProperties = []
HTMLFile = OntoConcepts.HTMLFileClass(Parent,DirName,AtelierURI)
HTMLFile.Initialize(AtelierLabel)
ChildrenOfParent = OntoGlobal.GetParentAssociation(Parent)
for Child in ChildrenOfParent:
if UtilityFunctions.Is_Child_Valid(Child, SeenProperties) == False: continue
SeenProperties.append(Child)
HTMLElement = OntoConcepts.HTMLElementClass(Child, Parent, MyGraph, Namespace)
SubChildFlatRelations = HTMLElement.GetSubChildFlatRelations(Child, MyGraph, Namespace)
HTMLTreeID = random.randint(1,1000)
PropertyTree = UtilityFunctions.GetTreeStructure("Property",SubChildFlatRelations,Child,HTMLTreeID)
ChildMinNumber = OntoGlobal.GetMinNumber(Child)
HTMLElement.Initialize(PropertyTree,SubChildFlatRelations,ChildMinNumber)
if OntoGlobal.PropMarker[Child].find("HasValue") != -1:
ChildValueClassLabel = OntoGlobal.GetValueClassLabel(Child)
ChildValueLabel = OntoGlobal.GetValueLabel(Child)
HTMLElement.AddHasValue(ChildValueClassLabel,ChildValueLabel)
if OntoGlobal.PropMarker[Child].find("Max") != -1:
ChildValueLabelMax = OntoGlobal.GetValueLabelMax(Child)
ChildMaxNumber = OntoGlobal.GetMaxNumber(Child)
HTMLElement.MaxVisited(ChildValueLabelMax)
HTMLElement.AddMax(ChildValueLabelMax,ChildMaxNumber)
if OntoGlobal.PropMarker[Child].find("Min") != -1:
ChildValueLabelMin = OntoGlobal.GetValueLabelMin(Child)
if OntoGlobal.PropMarker[Child].find("Alter") != -1:
ChildValueAlterLabel = OntoGlobal.GetValueAlternative(Child)
if ChildValueAlterLabel != -1:
HTMLElement.AddAlternativeValue(ChildValueAlterLabel, Child, MyGraph, Namespace)
# ChildMinNumber = OntoGlobal.GetMinNumber(Child)
# HTMLElement.PropertyAsterisk(Child, ChildMinNumber)
HTMLElement.AddMin(ChildValueLabelMin,ChildMinNumber,HTMLFile.ConstraintCount,Child,MyGraph,Namespace)
HTMLElement.AddOrphanMax(Child,HTMLFile.ConstraintCount,MyGraph, Namespace)
HTMLElement.OtherTextBox(Child, MyGraph, Namespace)
HTMLFile.AddJS(HTMLElement.GetJSScript())
HTMLFile.AddHTMLTreeJS(HTMLElement.GetHTMLTreeJS())
HTMLFile.IncreaseConstraintCount()
if HTMLElement.HasContent():
HTMLFile.AddHTMLElement(HTMLElement) # attach HTMLElement to HTMLPage
ParentFileName = Parent.replace(" ","_")
ParentFileName = ParentFileName.decode("utf-8")
FileName = "Page_"+ParentFileName+".php"
# AtelierIndexPage.write("<a href=\"static/"+FileName.encode('ascii', 'xmlcharrefreplace')+"?session_name={{ session_name }}\">"+Parent+"</a><br>\n")
# AtelierIndexPage.write("<span style='color:green'>$$</span><a href=\"/SelectPage?page="+FileName.encode('ascii', 'xmlcharrefreplace')+"&session_name={{ session_name }}&atelier_URI={{ atelier_URI }}\">"+Parent+"</a><br>\n")
# AtelierIndexPage.write("<a href=\"/SelectPage?page="+FileName.encode('ascii', 'xmlcharrefreplace')+"&session_name={{ session_name }}&atelier_URI={{ atelier_URI }}\">"+Parent+"</a><br>\n")
HTMLFile.Finalize()
HTMLFile.WriteToFile()
# AtelierIndexPage.write("</div></div>")
# AtelierIndexPage.close()
EndTime = time.time()
Duration = round(EndTime - StartTime, 2)
print "Generated HTML pages in "+str(Duration)+" seconds."
print(count_pages)
| 56.789474 | 721 | 0.712962 |
721a3c28c6d44ac82f8a0131a17146a019a1f561 | 627 | py | Python | utility/hash_util.py | andradediego/blockchainpython | f1b08c93c226458b09dab2b4c5d76a8b4cd83bcf | [
"MIT"
] | null | null | null | utility/hash_util.py | andradediego/blockchainpython | f1b08c93c226458b09dab2b4c5d76a8b4cd83bcf | [
"MIT"
] | null | null | null | utility/hash_util.py | andradediego/blockchainpython | f1b08c93c226458b09dab2b4c5d76a8b4cd83bcf | [
"MIT"
] | null | null | null | import hashlib as hl
import json
# __all__ = ['hash_string_256', 'hash_block']
def hash_string_256(string):
"""Create a SHA256 hash for a given input string.
Arguments:
:string: The string which should be hashed.
"""
return hl.sha256(string).hexdigest()
def hash_block(block):
"""Hashes a block and returns a string representation of it.
Arguments:
:block: The block that should be hashed.
"""
hashable_block = block.__dict__.copy()
hashable_block['transactions'] = [tx.to_ordered_dict() for tx in hashable_block['transactions']]
return hash_string_256(json.dumps(hashable_block, sort_keys=True).encode()) | 27.26087 | 97 | 0.744817 |
6b792a85e194e54a157977988edcfc6e4bb7a3ba | 2,119 | py | Python | examples/py/asciichart.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 24,910 | 2017-10-27T21:41:59.000Z | 2022-03-31T23:08:57.000Z | examples/py/asciichart.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 8,201 | 2017-10-28T10:19:28.000Z | 2022-03-31T23:49:37.000Z | examples/py/asciichart.py | diwenshi61/ccxt | ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6 | [
"MIT"
] | 6,632 | 2017-10-28T02:53:24.000Z | 2022-03-31T23:20:14.000Z | # -*- coding: utf-8 -*-
# This file is a copied implementation from my asciichart repository on GitHub
# https://github.com/kroitor/asciichart
from math import cos
# from math import sin
from math import pi
from math import floor
from math import ceil
def plot(series, cfg={}):
minimum = min(series)
maximum = max(series)
interval = abs(float(maximum) - float(minimum))
offset = cfg['offset'] if 'offset' in cfg else 3
# padding = cfg['padding'] if 'padding' in cfg else ' '
height = cfg['height'] if 'height' in cfg else interval
ratio = height / interval
# print(minimum,ratio,type(minimum))
min2 = floor(float(minimum) * ratio)
max2 = ceil(float(maximum) * ratio)
intmin2 = int(min2)
intmax2 = int(max2)
rows = abs(intmax2 - intmin2)
width = len(series) + offset
# format = cfg['format'] if 'format' in cfg else lambda x: (padding + '{:.2f}'.format(x))[:-len(padding)]
result = [[' '] * width for i in range(rows + 1)]
# axis and labels
for y in range(intmin2, intmax2 + 1):
label = '{:8.2f}'.format(float(maximum) - ((y - intmin2) * interval / rows))
result[y - intmin2][max(offset - len(label), 0)] = label
result[y - intmin2][offset - 1] = '┼' if y == 0 else '┤'
y0 = int(series[0] * ratio - min2)
result[rows - y0][offset - 1] = '┼' # first value
for x in range(0, len(series) - 1): # plot the line
y0 = int(round(series[x + 0] * ratio) - intmin2)
y1 = int(round(series[x + 1] * ratio) - intmin2)
if y0 == y1:
result[rows - y0][x + offset] = '─'
else:
result[rows - y1][x + offset] = '╰' if y0 > y1 else '╭'
result[rows - y0][x + offset] = '╮' if y0 > y1 else '╯'
start = min(y0, y1) + 1
end = max(y0, y1)
for y in range(start, end):
result[rows - y][x + offset] = '│'
return '\n'.join([''.join(row) for row in result])
if __name__ == '__main__':
width = 180
series = [15 * cos(i * ((pi * 4) / width)) for i in range(width)]
print(plot(series))
| 32.6 | 109 | 0.563001 |
952b7c7c2a1d7e8d80506304efbdc9860d74e770 | 1,075 | py | Python | Day01-15/code/Day13/test2.py | EngrSaad2/Python-100-Days | ab0b26714b1df50d02a1433dc82f2a3fb025be5c | [
"Apache-2.0"
] | 6 | 2020-04-22T14:07:51.000Z | 2021-09-07T12:55:23.000Z | Day01-15/code/Day13/test2.py | 2462612540/Python-Language | a676d1274a04ff03f1aea0de9c58019d6ef8f5fe | [
"Apache-2.0"
] | 88 | 2019-10-31T12:30:02.000Z | 2020-08-14T12:17:12.000Z | Day01-15/code/Day13/test2.py | 2462612540/Python-Language | a676d1274a04ff03f1aea0de9c58019d6ef8f5fe | [
"Apache-2.0"
] | 4 | 2019-08-25T05:51:00.000Z | 2021-04-16T08:14:16.000Z | import time
from threading import Thread, Lock
class Account(object):
def __init__(self, balance=0):
self._balance = balance
self._lock = Lock()
@property
def balance(self):
return self._balance
def deposit(self, money):
# 当多个线程同时访问一个资源的时候 就有可能因为竞争资源导致资源的状态错误
# 被多个线程访问的资源我们通常称之为临界资源 对临界资源的访问需要加上保护
if money > 0:
self._lock.acquire()
try:
new_balance = self._balance + money
time.sleep(0.01)
self._balance = new_balance
finally:
self._lock.release()
class AddMoneyThread(Thread):
def __init__(self, account):
super().__init__()
self._account = account
def run(self):
self._account.deposit(1)
def main():
account = Account(1000)
tlist = []
for _ in range(100):
t = AddMoneyThread(account)
tlist.append(t)
t.start()
for t in tlist:
t.join()
print('账户余额: %d元' % account.balance)
if __name__ == '__main__':
main()
| 20.673077 | 51 | 0.567442 |
dffba646dfd75b93f251e8e785b419c52cef6312 | 11,522 | py | Python | homeassistant/components/homematicip_cloud/climate.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/homematicip_cloud/climate.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/homematicip_cloud/climate.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 3 | 2022-01-02T18:49:54.000Z | 2022-01-25T02:03:54.000Z | """Support for HomematicIP Cloud climate devices."""
from __future__ import annotations
from typing import Any
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.base.enums import AbsenceType
from homematicip.device import Switch
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
HEATING_PROFILES = {"PROFILE_1": 0, "PROFILE_2": 1, "PROFILE_3": 2}
COOLING_PROFILES = {"PROFILE_4": 3, "PROFILE_5": 4, "PROFILE_6": 5}
ATTR_PRESET_END_TIME = "preset_end_time"
PERMANENT_END_TIME = "permanent"
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the HomematicIP climate from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.groups:
if isinstance(device, AsyncHeatingGroup):
entities.append(HomematicipHeatingGroup(hap, device))
if entities:
async_add_entities(entities)
class HomematicipHeatingGroup(HomematicipGenericEntity, ClimateEntity):
"""Representation of the HomematicIP heating group.
Heat mode is supported for all heating devices incl. their defined profiles.
Boost is available for radiator thermostats only.
Cool mode is only available for floor heating systems, if basically enabled in the hmip app.
"""
def __init__(self, hap: HomematicipHAP, device: AsyncHeatingGroup) -> None:
"""Initialize heating group."""
device.modelType = "HmIP-Heating-Group"
super().__init__(hap, device)
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = self._first_radiator_thermostat
@property
def device_info(self) -> DeviceInfo:
"""Return device specific attributes."""
return DeviceInfo(
identifiers={(HMIPC_DOMAIN, self._device.id)},
manufacturer="eQ-3",
model=self._device.modelType,
name=self._device.label,
via_device=(HMIPC_DOMAIN, self._device.homeId),
)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie."""
if self._disabled_by_cooling_mode and not self._has_switch:
return HVAC_MODE_OFF
if self._device.boostMode:
return HVAC_MODE_HEAT
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT if self._heat_mode_enabled else HVAC_MODE_COOL
return HVAC_MODE_AUTO
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
if self._disabled_by_cooling_mode and not self._has_switch:
return [HVAC_MODE_OFF]
return (
[HVAC_MODE_AUTO, HVAC_MODE_HEAT]
if self._heat_mode_enabled
else [HVAC_MODE_AUTO, HVAC_MODE_COOL]
)
@property
def hvac_action(self) -> str | None:
"""
Return the current hvac_action.
This is only relevant for radiator thermostats.
"""
if (
self._device.floorHeatingMode == "RADIATOR"
and self._has_radiator_thermostat
and self._heat_mode_enabled
):
return (
CURRENT_HVAC_HEAT if self._device.valvePosition else CURRENT_HVAC_IDLE
)
return None
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
if self._device.boostMode:
return PRESET_BOOST
if self.hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF):
return PRESET_NONE
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType == AbsenceType.VACATION:
return PRESET_AWAY
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
]:
return PRESET_ECO
return (
self._device.activeProfile.name
if self._device.activeProfile.name in self._device_profile_names
else None
)
@property
def preset_modes(self) -> list[str]:
"""Return a list of available preset modes incl. hmip profiles."""
# Boost is only available if a radiator thermostat is in the room,
# and heat mode is enabled.
profile_names = self._device_profile_names
presets = []
if (
self._heat_mode_enabled and self._has_radiator_thermostat
) or self._has_switch:
if not profile_names:
presets.append(PRESET_NONE)
presets.append(PRESET_BOOST)
presets.extend(profile_names)
return presets
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
if self.min_temp <= temperature <= self.max_temp:
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
return
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
return
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
if preset_mode in self._device_profile_names:
profile_idx = self._get_profile_idx_by_name(preset_mode)
if self._device.controlMode != HMIP_AUTOMATIC_CM:
await self.async_set_hvac_mode(HVAC_MODE_AUTO)
await self._device.set_active_profile(profile_idx)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes of the access point."""
state_attr = super().extra_state_attributes
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.VACATION,
]:
state_attr[ATTR_PRESET_END_TIME] = self._indoor_climate.absenceEndTime
elif self._indoor_climate.absenceType == AbsenceType.PERMANENT:
state_attr[ATTR_PRESET_END_TIME] = PERMANENT_END_TIME
return state_attr
@property
def _indoor_climate(self) -> IndoorClimateHome:
"""Return the hmip indoor climate functional home of this group."""
return self._home.get_functionalHome(IndoorClimateHome)
@property
def _device_profiles(self) -> list[Any]:
"""Return the relevant profiles."""
return [
profile
for profile in self._device.profiles
if profile.visible
and profile.name != ""
and profile.index in self._relevant_profile_group
]
@property
def _device_profile_names(self) -> list[str]:
"""Return a collection of profile names."""
return [profile.name for profile in self._device_profiles]
def _get_profile_idx_by_name(self, profile_name: str) -> int:
"""Return a profile index by name."""
relevant_index = self._relevant_profile_group
index_name = [
profile.index
for profile in self._device_profiles
if profile.name == profile_name
]
return relevant_index[index_name[0]]
@property
def _heat_mode_enabled(self) -> bool:
"""Return, if heating mode is enabled."""
return not self._device.cooling
@property
def _disabled_by_cooling_mode(self) -> bool:
"""Return, if group is disabled by the cooling mode."""
return self._device.cooling and (
self._device.coolingIgnored or not self._device.coolingAllowed
)
@property
def _relevant_profile_group(self) -> dict[str, int]:
"""Return the relevant profile groups."""
if self._disabled_by_cooling_mode:
return {}
return HEATING_PROFILES if self._heat_mode_enabled else COOLING_PROFILES
@property
def _has_switch(self) -> bool:
"""Return, if a switch is in the hmip heating group."""
for device in self._device.devices:
if isinstance(device, Switch):
return True
return False
@property
def _has_radiator_thermostat(self) -> bool:
"""Return, if a radiator thermostat is in the hmip heating group."""
return bool(self._first_radiator_thermostat)
@property
def _first_radiator_thermostat(
self,
) -> AsyncHeatingThermostat | AsyncHeatingThermostatCompact | None:
"""Return the first radiator thermostat from the hmip heating group."""
for device in self._device.devices:
if isinstance(
device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)
):
return device
return None
| 33.988201 | 96 | 0.658827 |
c949ca6b847923abbfa491c94050bab2e2263710 | 2,665 | py | Python | project_euler/001-050/08.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | project_euler/001-050/08.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | project_euler/001-050/08.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | '''
The four adjacent digits in the 1000-digit number that have the greatest
product are 9 × 9 × 8 × 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the
greatest product. What is the value of this product?
'''
from numpy import *
a = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
list_a = [int(x) for x in str(a)]
def product_adjacent(num: "iterable<int>", adjacent: "int") -> "int":
res = 1
for i in range(len(num) - adjacent):
aux = 1
for j in range(i, i + adjacent):
aux *= list_a[j]
if aux > res:
res = aux
return res
print(product_adjacent(list_a, 13))
# Solución: 23514624000 | 55.520833 | 1,004 | 0.899812 |
7d4d02b7c7f85aebb5a24805f3e96d2c2247d0e6 | 1,843 | py | Python | stt.py | Goneiross/Yumiakui | 594f4445847112c5bb8e76ac63fb1b7c9b798902 | [
"MIT"
] | null | null | null | stt.py | Goneiross/Yumiakui | 594f4445847112c5bb8e76ac63fb1b7c9b798902 | [
"MIT"
] | null | null | null | stt.py | Goneiross/Yumiakui | 594f4445847112c5bb8e76ac63fb1b7c9b798902 | [
"MIT"
] | null | null | null | import json
from os.path import join, dirname
from ibm_watson import SpeechToTextV1
from ibm_watson.websocket import RecognizeCallback, AudioSource
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
STT_NAME = "GOOGLE"
def stt_init():
"""
Initialize choosen STT.
Supports IBM Watson and Google Cloud.
Returns: stt (SpeechToText)
"""
if (STT_NAME == "IBM"):
f = open(join(dirname(__file__), "data/", "credentials/", "IBM_STT_key"), "r")
IMB_KEY = f.readline()
f.close()
f = open(join(dirname(__file__), "data/", "credentials/", "IBM_STT_url"), "r")
IMB_URL = f.readline()
f.close()
authenticator = IAMAuthenticator(IMB_KEY)
speech_to_text = SpeechToTextV1(authenticator=authenticator)
speech_to_text.set_service_url(IMB_URL)
return speech_to_text
elif (STT_NAME == "GOOGLE_CLOUD"):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]=join(dirname(__file__), "data/", "credentials/", "GOOGLE_STT_key.JSON")
elif (STT_NAME == "GOOGLE"):
pass
else:
print("ERROR - WRONG STT NAME")
def stt_transcript(stt, audioSource):
"""
Recognizes the voice to return a text.
Parameters: audioSource(Audio)
Returns: text (string)
"""
if (STT_NAME == "IBM"):
results = stt.recognize(audio=audioSource.get_wav_data(), content_type='audio/wav').get_result()
print(results)
r = ""
try:
r = results.get('results').pop().get('alternatives').pop().get('transcript')
pass
except:
print("ERROR")
pass
print(r)
return (r)
elif (STT_NAME == "GOOGLE_CLOUD"):
return stt.recognize_google_cloud(audioSource, language = 'en-US')
else:
print("ERROR - WRONG STT NAME") | 32.333333 | 124 | 0.627238 |
5da3b6368220dffa8429831e9000ad7b94a2faf2 | 756 | py | Python | src/modules/encoder.py | andompesta/omnitext | da6467b6cd9086b2278f7a1560596261f125800e | [
"MIT"
] | null | null | null | src/modules/encoder.py | andompesta/omnitext | da6467b6cd9086b2278f7a1560596261f125800e | [
"MIT"
] | null | null | null | src/modules/encoder.py | andompesta/omnitext | da6467b6cd9086b2278f7a1560596261f125800e | [
"MIT"
] | null | null | null | from torch import nn, Tensor
from typing import Optional
from src.config import BaseConf
from src.modules import (
EncoderLayer
)
class Encoder(nn.Module):
def __init__(self, conf: BaseConf):
super(Encoder, self).__init__()
self.layer = nn.ModuleList([EncoderLayer(conf) for _ in range(conf.num_hidden_layers)])
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
**kwargs
) -> Tensor:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
**kwargs
)
return hidden_states
| 28 | 95 | 0.612434 |
3f780cc185e57aaf36a1d1174e5ac2c39d8e642d | 10,996 | py | Python | DjangoUeditor/views.py | niuwenju/minicms | 745f34304abbe30c88eba9f3878f70332b8deba3 | [
"MIT"
] | null | null | null | DjangoUeditor/views.py | niuwenju/minicms | 745f34304abbe30c88eba9f3878f70332b8deba3 | [
"MIT"
] | null | null | null | DjangoUeditor/views.py | niuwenju/minicms | 745f34304abbe30c88eba9f3878f70332b8deba3 | [
"MIT"
] | null | null | null | #coding:utf-8
from importlib import import_module
from django.http import HttpResponse
import settings as USettings
import os
import json
from django.views.decorators.csrf import csrf_exempt
import datetime,random
import urllib
def get_path_format_vars():
return {
"year":datetime.datetime.now().strftime("%Y"),
"month":datetime.datetime.now().strftime("%m"),
"day":datetime.datetime.now().strftime("%d"),
"date": datetime.datetime.now().strftime("%Y%m%d"),
"time":datetime.datetime.now().strftime("%H%M%S"),
"datetime":datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
"rnd":random.randrange(100,999)
}
#保存上传的文件
def save_upload_file(PostFile,FilePath):
try:
f = open(FilePath, 'wb')
for chunk in PostFile.chunks():
f.write(chunk)
except Exception,E:
f.close()
return u"写入文件错误:"+ E.message
f.close()
return u"SUCCESS"
@csrf_exempt
def get_ueditor_settings(request):
return HttpResponse(json.dumps(USettings.UEditorUploadSettings,ensure_ascii=False), content_type="application/javascript")
@csrf_exempt
def get_ueditor_controller(request):
"""获取ueditor的后端URL地址 """
action=request.GET.get("action","")
reponseAction={
"config":get_ueditor_settings,
"uploadimage":UploadFile,
"uploadscrawl":UploadFile,
"uploadvideo":UploadFile,
"uploadfile":UploadFile,
"catchimage":catcher_remote_image,
"listimage":list_files,
"listfile":list_files
}
return reponseAction[action](request)
@csrf_exempt
def list_files(request):
"""列出文件"""
if request.method!="GET":
return HttpResponse(json.dumps(u"{'state:'ERROR'}") ,content_type="application/javascript")
#取得动作
action=request.GET.get("action","listimage")
allowFiles={
"listfile":USettings.UEditorUploadSettings.get("fileManagerAllowFiles",[]),
"listimage":USettings.UEditorUploadSettings.get("imageManagerAllowFiles",[])
}
listSize={
"listfile":USettings.UEditorUploadSettings.get("fileManagerListSize",""),
"listimage":USettings.UEditorUploadSettings.get("imageManagerListSize","")
}
listpath={
"listfile":USettings.UEditorUploadSettings.get("fileManagerListPath",""),
"listimage":USettings.UEditorUploadSettings.get("imageManagerListPath","")
}
#取得参数
list_size=long(request.GET.get("size",listSize[action]))
list_start=long(request.GET.get("start",0))
files=[]
root_path=os.path.join(USettings.gSettings.MEDIA_ROOT,listpath[action]).replace("\\","/")
files=get_files(root_path,root_path,allowFiles[action])
if (len(files)==0):
return_info={
"state":u"未找到匹配文件!",
"list":[],
"start":list_start,
"total":0
}
else:
return_info={
"state":"SUCCESS",
"list":files[list_start:list_start+list_size],
"start":list_start,
"total":len(files)
}
return HttpResponse(json.dumps(return_info),content_type="application/javascript")
def get_files(root_path,cur_path, allow_types=[]):
files = []
items = os.listdir(cur_path)
for item in items:
item=unicode(item)
item_fullname = os.path.join(root_path,cur_path, item).replace("\\", "/")
if os.path.isdir(item_fullname):
files.extend(get_files(root_path,item_fullname, allow_types))
else:
ext = os.path.splitext(item_fullname)[1]
is_allow_list= (len(allow_types)==0) or (ext in allow_types)
if is_allow_list:
files.append({
"url":urllib.basejoin(USettings.gSettings.MEDIA_URL ,os.path.join(os.path.relpath(cur_path,root_path),item).replace("\\","/" )),
"mtime":os.path.getmtime(item_fullname)
})
return files
@csrf_exempt
def UploadFile(request):
"""上传文件"""
if not request.method=="POST":
return HttpResponse(json.dumps(u"{'state:'ERROR'}"),content_type="application/javascript")
state="SUCCESS"
action=request.GET.get("action")
#上传文件
upload_field_name={
"uploadfile":"fileFieldName","uploadimage":"imageFieldName",
"uploadscrawl":"scrawlFieldName","catchimage":"catcherFieldName",
"uploadvideo":"videoFieldName",
}
UploadFieldName=request.GET.get(upload_field_name[action],USettings.UEditorUploadSettings.get(action,"upfile"))
#上传涂鸦,涂鸦是采用base64编码上传的,需要单独处理
if action=="uploadscrawl":
upload_file_name="scrawl.png"
upload_file_size=0
else:
#取得上传的文件
file=request.FILES.get(UploadFieldName,None)
if file is None:return HttpResponse(json.dumps(u"{'state:'ERROR'}") ,content_type="application/javascript")
upload_file_name=file.name
upload_file_size=file.size
#取得上传的文件的原始名称
upload_original_name,upload_original_ext=os.path.splitext(upload_file_name)
#文件类型检验
upload_allow_type={
"uploadfile":"fileAllowFiles",
"uploadimage":"imageAllowFiles",
"uploadvideo":"videoAllowFiles"
}
if upload_allow_type.has_key(action):
allow_type= list(request.GET.get(upload_allow_type[action],USettings.UEditorUploadSettings.get(upload_allow_type[action],"")))
if not upload_original_ext in allow_type:
state=u"服务器不允许上传%s类型的文件。" % upload_original_ext
#大小检验
upload_max_size={
"uploadfile":"filwMaxSize",
"uploadimage":"imageMaxSize",
"uploadscrawl":"scrawlMaxSize",
"uploadvideo":"videoMaxSize"
}
max_size=long(request.GET.get(upload_max_size[action],USettings.UEditorUploadSettings.get(upload_max_size[action],0)))
if max_size!=0:
from utils import FileSize
MF=FileSize(max_size)
if upload_file_size>MF.size:
state=u"上传文件大小不允许超过%s。" % MF.FriendValue
#检测保存路径是否存在,如果不存在则需要创建
upload_path_format={
"uploadfile":"filePathFormat",
"uploadimage":"imagePathFormat",
"uploadscrawl":"scrawlPathFormat",
"uploadvideo":"videoPathFormat"
}
path_format_var=get_path_format_vars()
path_format_var.update({
"basename":upload_original_name,
"extname":upload_original_ext[1:],
"filename":upload_file_name,
})
#取得输出文件的路径
OutputPathFormat,OutputPath,OutputFile=get_output_path(request,upload_path_format[action],path_format_var)
#所有检测完成后写入文件
if state=="SUCCESS":
if action=="uploadscrawl":
state=save_scrawl_file(request, os.path.join(OutputPath,OutputFile))
else:
#保存到文件中,如果保存错误,需要返回ERROR
upload_module_name = USettings.UEditorUploadSettings.get("upload_module", None)
if upload_module_name:
mod = import_module(upload_module_name)
state = mod.upload(file, OutputPathFormat)
else:
state = save_upload_file(file, os.path.join(OutputPath, OutputFile))
#返回数据
return_info = {
'url': urllib.basejoin(USettings.gSettings.MEDIA_URL , OutputPathFormat) , # 保存后的文件名称
'original': upload_file_name, #原始文件名
'type': upload_original_ext,
'state': state, #上传状态,成功时返回SUCCESS,其他任何值将原样返回至图片上传框中
'size': upload_file_size
}
return HttpResponse(json.dumps(return_info,ensure_ascii=False),content_type="application/javascript")
@csrf_exempt
def catcher_remote_image(request):
"""远程抓图,当catchRemoteImageEnable:true时,
如果前端插入图片地址与当前web不在同一个域,则由本函数从远程下载图片到本地
"""
if not request.method=="POST":
return HttpResponse(json.dumps( u"{'state:'ERROR'}"),content_type="application/javascript")
state="SUCCESS"
allow_type= list(request.GET.get("catcherAllowFiles",USettings.UEditorUploadSettings.get("catcherAllowFiles","")))
max_size=long(request.GET.get("catcherMaxSize",USettings.UEditorUploadSettings.get("catcherMaxSize",0)))
remote_urls=request.POST.getlist("source[]",[])
catcher_infos=[]
path_format_var=get_path_format_vars()
for remote_url in remote_urls:
#取得上传的文件的原始名称
remote_file_name=os.path.basename(remote_url)
remote_original_name,remote_original_ext=os.path.splitext(remote_file_name)
#文件类型检验
if remote_original_ext in allow_type:
path_format_var.update({
"basename":remote_original_name,
"extname":remote_original_ext[1:],
"filename":remote_original_name
})
#计算保存的文件名
o_path_format,o_path,o_file=get_output_path(request,"catcherPathFormat",path_format_var)
o_filename=os.path.join(o_path,o_file).replace("\\","/")
#读取远程图片文件
try:
remote_image=urllib.urlopen(remote_url)
#将抓取到的文件写入文件
try:
f = open(o_filename, 'wb')
f.write(remote_image.read())
f.close()
state="SUCCESS"
except Exception,E:
state=u"写入抓取图片文件错误:%s" % E.message
except Exception,E:
state=u"抓取图片错误:%s" % E.message
catcher_infos.append({
"state":state,
"url":urllib.basejoin(USettings.gSettings.MEDIA_URL , o_path_format),
"size":os.path.getsize(o_filename),
"title":os.path.basename(o_file),
"original":remote_file_name,
"source":remote_url
})
return_info={
"state":"SUCCESS" if len(catcher_infos) >0 else "ERROR",
"list":catcher_infos
}
return HttpResponse(json.dumps(return_info,ensure_ascii=False),content_type="application/javascript")
def get_output_path(request,path_format,path_format_var):
#取得输出文件的路径
OutputPathFormat=(request.GET.get(path_format,USettings.UEditorSettings["defaultPathFormat"]) % path_format_var).replace("\\","/")
#分解OutputPathFormat
OutputPath,OutputFile=os.path.split(OutputPathFormat)
OutputPath=os.path.join(USettings.gSettings.MEDIA_ROOT,OutputPath)
if not OutputFile:#如果OutputFile为空说明传入的OutputPathFormat没有包含文件名,因此需要用默认的文件名
OutputFile=USettings.UEditorSettings["defaultPathFormat"] % path_format_var
OutputPathFormat=os.path.join(OutputPathFormat,OutputFile)
if not os.path.exists(OutputPath):
os.makedirs(OutputPath)
return ( OutputPathFormat,OutputPath,OutputFile)
#涂鸦功能上传处理
@csrf_exempt
def save_scrawl_file(request,filename):
import base64
try:
content=request.POST.get(USettings.UEditorUploadSettings.get("scrawlFieldName","upfile"))
f = open(filename, 'wb')
f.write(base64.decodestring(content))
f.close()
state="SUCCESS"
except Exception,E:
state="写入图片文件错误:%s" % E.message
return state
| 35.81759 | 148 | 0.651328 |
95d939e07728975669a564ebdc8daa88df65363e | 4,795 | py | Python | tests/test_api_interface.py | CSugarPrince/Historical-Forex-Predictor | f50eb317852e540c3dfb3199769ce0ba250bf741 | [
"MIT"
] | null | null | null | tests/test_api_interface.py | CSugarPrince/Historical-Forex-Predictor | f50eb317852e540c3dfb3199769ce0ba250bf741 | [
"MIT"
] | null | null | null | tests/test_api_interface.py | CSugarPrince/Historical-Forex-Predictor | f50eb317852e540c3dfb3199769ce0ba250bf741 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import patch, Mock
# appending to sys.path enables scripts in test folder to import code from
# parent directory where src files are located
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import api_interface
""" unittest design notes:
The two functions tested do two things:
1. they make an http request to a server
2. they download and parse the repsonse (which is a json file)
I am using a mock object to impersonate the 'requests.get' function so that
the unittest code doesn't actually make a http request.
The return value of the mocked 'requests.get' changes based on what I am testing for
The things I have tested for include:
1. The function makes the request and gets a response containging the desired info
2. The function makes the request but it doesn't get a response from the server (requests.get returns None)
3. The function makes the request and gets a response, but it is an error message. When the function receives an error message
it should print out the error message (will be changed to a log) and it will return a None object
"""
class TestApiInterface(unittest.TestCase):
def setUp(self):
print('setUp')
self.sample_date_1 = '2002-04-14'
def tearDown(self):
pass
def test_get_fixer_historical_rates(self):
print('test_get_fixer_historical_rates')
with patch('api_interface.requests.get') as mocked_get:
# test that a successful call yields a successful response
mocked_get.return_value = Mock()
mocked_get.return_value.json.return_value = {'success': True, 'info': 'foo'}
function_output = api_interface.get_fixer_historical_rates(self.sample_date_1)
self.assertEqual(function_output['success'], True)
self.assertEqual(function_output['source'], 'http://data.fixer.io/api/')
# test that an unsuccessful call causes the function to return None
# case 1: something wrong on server side (internet is down, etc...)
mocked_get.return_value = None
function_output = api_interface.get_fixer_historical_rates(self.sample_date_1)
self.assertEqual(function_output, None)
# case 2: get a response, but the response is an error message
mocked_get.return_value = Mock()
parsed_error_message = {'success': False,
'error': {
'code': 999,
'type': 'mock error'
}
}
mocked_get.return_value.json.return_value = parsed_error_message
function_output = api_interface.get_fixer_historical_rates(self.sample_date_1)
self.assertEqual(function_output, None)
def test_get_openex_historical_rates(self):
print('test_get_openex_historical_rates')
with patch('api_interface.requests.get') as mocked_get:
# test that a successful call yields a successful response
mocked_get.return_value = Mock()
mocked_get.return_value.json.return_value = {'success': True, 'info': 'foo'}
function_output = api_interface.get_openex_historical_rates(self.sample_date_1)
self.assertEqual(function_output['success'], True)
self.assertEqual(function_output['date'], self.sample_date_1)
self.assertEqual(function_output['source'], 'https://openexchangerates.org/api/')
# test that an unsuccessful call causes the function to return None
# case 1: something wrong on server side (internet is down, etc...)
mocked_get.return_value = None
function_output = api_interface.get_openex_historical_rates(self.sample_date_1)
self.assertEqual(function_output, None)
# case 2: get a response, but the response is an error message
mocked_get.return_value = Mock()
parsed_error_message = {'success' : False,
'error' : True,
'status' : 999,
'message' : 'not_available',
'description': 'mock error description'
}
mocked_get.return_value.json.return_value = parsed_error_message
function_output = api_interface.get_openex_historical_rates(self.sample_date_1)
self.assertEqual(function_output, None)
if __name__ == "__main__":
unittest.main() | 43.198198 | 126 | 0.63024 |
0b6e6a15e784a4fecbaacd2d7686ab8e1b4a8ffc | 1,801 | py | Python | 1_Crawler/1_newscrawler/newscrawler/spiders/konya.py | bap-project/SMM | e65e27b985a4aa108fdb5236bc63e70f423661bf | [
"MIT"
] | null | null | null | 1_Crawler/1_newscrawler/newscrawler/spiders/konya.py | bap-project/SMM | e65e27b985a4aa108fdb5236bc63e70f423661bf | [
"MIT"
] | null | null | null | 1_Crawler/1_newscrawler/newscrawler/spiders/konya.py | bap-project/SMM | e65e27b985a4aa108fdb5236bc63e70f423661bf | [
"MIT"
] | null | null | null | from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from datetime import datetime
from ..items import NewsItem
import pandas as pd
import re
class KonyaSpider(CrawlSpider):
name = "konya"
allowed_domains = ["https://www.bulurum.com"]
def __init__(self,end='', *args, **kwargs):
#super(CumhuriyetSpider, self).__init__(*args, **kwargs)
self.start_urls = ["https://www.bulurum.com/dir/eczaneler/konya/?page=%s" % d for d in range(0,9)]
rules = (
Rule(LinkExtractor(allow=(),deny=('.*/video/.*',), restrict_xpaths=('//*[@id="divMap"]',)), callback="parse_items", follow= True),
)
def parse_items(self, response):
hxs = HtmlXPathSelector(response)
item = NewsItem()
item["link"] = response.request.url
item["lang"] = "tr"
item["source"] = "konya"
category = hxs.xpath("/html/body/div[6]/div[2]/div[1]/div[4]/div/div[2]/div[2]/div/div[1]/div[1]/div/h2").extract()
date_time = hxs.xpath("").extract()
item["author"] = ""
title = hxs.xpath("/html/body/div[6]/div[2]/div[1]/div[4]/div/div[2]/div[2]/div/div[1]/div[2]/div[1]/div[2]").extract()
intro = hxs.xpath("//*[@id='phoneDetails_0']").extract()
new_content = ""
#
# Processing outputs
item["intro"] = ' '.join(intro)
item["title"] = ' '.join(title)
new_content = ' '.join(new_content)
new_content = re.sub('\n',' ',new_content)
item["content"] = re.sub('\s{2,}',' ',new_content)
item["category"] = '|'.join(category)
item["date_time"] = " ".join(date_time)
return(item)
| 39.152174 | 138 | 0.57357 |
abb82ec2cbeafd64de91decca87cd91a9365520f | 42,033 | py | Python | irc/client.py | larsks/irc | 187e6b4dc4cce60c7bf972d53f54723189751711 | [
"MIT"
] | null | null | null | irc/client.py | larsks/irc | 187e6b4dc4cce60c7bf972d53f54723189751711 | [
"MIT"
] | null | null | null | irc/client.py | larsks/irc | 187e6b4dc4cce60c7bf972d53f54723189751711 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol in Python.
It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, and DCC chat.
To best understand how to make an IRC client, the reader more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtleties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* DCC file transfers are not supported.
* RFCs 2810, 2811, 2812, and 2813 have not been considered.
Notes:
* connection.quit() only sends QUIT to the server.
* ERROR from the server triggers the error event and the disconnect event.
* dropping of the connection triggers the disconnect event.
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import time
import struct
import logging
import threading
import abc
import collections
import functools
import itertools
import contextlib
import warnings
import jaraco.functools
from jaraco.functools import Throttler
from jaraco.stream import buffer
from more_itertools import consume, always_iterable, repeatfunc
from . import connection
from . import events
from . import features
from . import ctcp
from . import message
from . import schedule
log = logging.getLogger(__name__)
class IRCError(Exception):
"An IRC exception"
class InvalidCharacters(ValueError):
"Invalid characters were encountered in the message"
class MessageTooLong(ValueError):
"Message is too long"
class Connection(metaclass=abc.ABCMeta):
"""
Base class for IRC connections.
"""
transmit_encoding = 'utf-8'
"encoding used for transmission"
@abc.abstractproperty
def socket(self):
"The socket for this connection"
def __init__(self, reactor):
self.reactor = reactor
def encode(self, msg):
"""Encode a message for transmission."""
return msg.encode(self.transmit_encoding)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
class ServerConnection(Connection):
"""
An IRC server connection.
ServerConnection objects are instantiated by calling the server
method on a Reactor object.
"""
buffer_class = buffer.DecodingLineBuffer
socket = None
connected = False
def __init__(self, reactor):
super(ServerConnection, self).__init__(reactor)
self.features = features.FeatureSet()
# save the method args to allow for easier reconnection.
@jaraco.functools.save_method_args
def connect(
self,
server,
port,
nickname,
password=None,
username=None,
ircname=None,
connect_factory=connection.Factory(),
):
"""Connect/reconnect to a server.
Arguments:
* server - Server name
* port - Port number
* nickname - The nickname
* password - Password (if any)
* username - The username
* ircname - The IRC name ("realname")
* server_address - The remote host/port of the server
* connect_factory - A callable that takes the server address and
returns a connection (with a socket interface)
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
log.debug(
"connect(server=%r, port=%r, nickname=%r, ...)", server, port, nickname
)
if self.connected:
self.disconnect("Changing servers")
self.buffer = self.buffer_class()
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.server_address = (server, port)
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.connect_factory = connect_factory
try:
self.socket = self.connect_factory(self.server_address)
except socket.error as ex:
raise ServerConnectionError("Couldn't connect to socket: %s" % ex)
self.connected = True
self.reactor._on_connect(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def reconnect(self):
"""
Reconnect with the last arguments passed to self.connect()
"""
self.connect(*self._saved_connect.args, **self._saved_connect.kwargs)
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
# Without this thread lock, there is a window during which
# select() can find a closed socket, leading to an EBADF error.
with self.reactor.mutex:
self.disconnect("Closing object")
self.reactor._remove_connection(self)
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
return self.real_server_name or ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method.
"""
return self.real_nickname
@contextlib.contextmanager
def as_nick(self, name):
"""
Set the nick for the duration of the context.
"""
orig = self.get_nickname()
self.nick(name)
try:
yield orig
finally:
self.nick(orig)
def process_data(self):
"read and process input from self.socket"
try:
reader = getattr(self.socket, 'read', self.socket.recv)
new_data = reader(2 ** 14)
except socket.error:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
self.buffer.feed(new_data)
# process each non-empty line after logging all lines
for line in self.buffer:
log.debug("FROM SERVER: %s", line)
if not line:
continue
self._process_line(line)
def _process_line(self, line):
event = Event("all_raw_messages", self.get_server_name(), None, [line])
self._handle_event(event)
grp = _rfc_1459_command_regexp.match(line).group
source = NickMask.from_group(grp("prefix"))
command = self._command_from_group(grp("command"))
arguments = message.Arguments.from_group(grp('argument'))
tags = message.Tag.from_group(grp('tags'))
if source and not self.real_server_name:
self.real_server_name = source
if command == "nick":
if source.nick == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
elif command == "featurelist":
self.features.load(arguments)
handler = (
self._handle_message
if command in ["privmsg", "notice"]
else self._handle_other
)
handler(arguments, command, source, tags)
def _handle_message(self, arguments, command, source, tags):
target, msg = arguments[:2]
messages = ctcp.dequote(msg)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if isinstance(m, tuple):
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
log.debug(
"command: %s, source: %s, target: %s, " "arguments: %s, tags: %s",
command,
source,
target,
m,
tags,
)
event = Event(command, source, target, m, tags)
self._handle_event(event)
if command == "ctcp" and m[0] == "ACTION":
event = Event("action", source, target, m[1:], tags)
self._handle_event(event)
else:
log.debug(
"command: %s, source: %s, target: %s, " "arguments: %s, tags: %s",
command,
source,
target,
[m],
tags,
)
event = Event(command, source, target, [m], tags)
self._handle_event(event)
def _handle_other(self, arguments, command, source, tags):
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0] if arguments else None
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
log.debug(
"command: %s, source: %s, target: %s, " "arguments: %s, tags: %s",
command,
source,
target,
arguments,
tags,
)
event = Event(command, source, target, arguments, tags)
self._handle_event(event)
@staticmethod
def _command_from_group(group):
command = group.lower()
# Translate numerics into more readable strings.
return events.numeric.get(command, command)
def _handle_event(self, event):
"""[Internal]"""
self.reactor._handle_event(self, event)
if event.type in self.handlers:
for fn in self.handlers[event.type]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.reactor.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.reactor.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_items('ADMIN', server)
def cap(self, subcommand, *args):
"""
Send a CAP command according to `the spec
<http://ircv3.atheme.org/specification/capability-negotiation-3.1>`_.
Arguments:
subcommand -- LS, LIST, REQ, ACK, CLEAR, END
args -- capabilities, if required for given subcommand
Example:
.cap('LS')
.cap('REQ', 'multi-prefix', 'sasl')
.cap('END')
"""
cap_subcommands = set('LS LIST REQ ACK NAK CLEAR END'.split())
client_subcommands = set(cap_subcommands) - {'NAK'}
assert subcommand in client_subcommands, "invalid subcommand"
def _multi_parameter(args):
"""
According to the spec::
If more than one capability is named, the RFC1459 designated
sentinel (:) for a multi-parameter argument must be present.
It's not obvious where the sentinel should be present or if it
must be omitted for a single parameter, so follow convention and
only include the sentinel prefixed to the first parameter if more
than one parameter is present.
"""
if len(args) > 1:
return (':' + args[0],) + args[1:]
return args
self.send_items('CAP', subcommand, *_multi_parameter(args))
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
tmpl = "\001{ctcptype} {parameter}\001" if parameter else "\001{ctcptype}\001"
self.privmsg(target, tmpl.format(**vars()))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
self.quit(message)
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_items('GLOBOPS', ':' + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_items('INFO', server)
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_items('INVITE', nick, channel)
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_items('ISON', *tuple(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_items('JOIN', channel, key)
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_items('KICK', channel, nick, comment and ':' + comment)
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
self.send_items('LINKS', remote_server, server_mask)
def list(self, channels=None, server=""):
"""Send a LIST command."""
self.send_items('LIST', ','.join(always_iterable(channels)), server)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_items('LUSERS', server)
def mode(self, target, command):
"""Send a MODE command."""
self.send_items('MODE', target, command)
def motd(self, server=""):
"""Send an MOTD command."""
self.send_items('MOTD', server)
def names(self, channels=None):
"""Send a NAMES command."""
self.send_items('NAMES', ','.join(always_iterable(channels)))
def nick(self, newnick):
"""Send a NICK command."""
self.send_items('NICK', newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_items('NOTICE', target, ':' + text)
def oper(self, nick, password):
"""Send an OPER command."""
self.send_items('OPER', nick, password)
def part(self, channels, message=""):
"""Send a PART command."""
self.send_items('PART', ','.join(always_iterable(channels)), message)
def pass_(self, password):
"""Send a PASS command."""
self.send_items('PASS', password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_items('PING', target, target2)
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_items('PONG', target, target2)
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
self.send_items('PRIVMSG', target, ':' + text)
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
target = ','.join(targets)
return self.privmsg(target, text)
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_items('QUIT', message and ':' + message)
def _prep_message(self, string):
# The string should not contain any carriage return other than the
# one added here.
if '\n' in string:
msg = "Carriage returns not allowed in privmsg(text)"
raise InvalidCharacters(msg)
bytes = self.encode(string) + b'\r\n'
# According to the RFC http://tools.ietf.org/html/rfc2812#page-6,
# clients should not transmit more than 512 bytes.
if len(bytes) > 512:
msg = "Messages limited to 512 bytes including CR/LF"
raise MessageTooLong(msg)
return bytes
def send_items(self, *items):
"""
Send all non-empty items, separated by spaces.
"""
self.send_raw(' '.join(filter(None, items)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError("Not connected.")
sender = getattr(self.socket, 'write', self.socket.send)
try:
sender(self._prep_message(string))
log.debug("TO SERVER: %s", string)
except socket.error:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_items('SQUIT', server, comment and ':' + comment)
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_items('STATS', statstype, server)
def time(self, server=""):
"""Send a TIME command."""
self.send_items('TIME', server)
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
self.send_items('TOPIC', channel, new_topic and ':' + new_topic)
def trace(self, target=""):
"""Send a TRACE command."""
self.send_items('TRACE', target)
def user(self, username, realname):
"""Send a USER command."""
cmd = 'USER {username} 0 * :{realname}'.format(**locals())
self.send_raw(cmd)
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_items('USERHOST', ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_items('USERS', server)
def version(self, server=""):
"""Send a VERSION command."""
self.send_items('VERSION', server)
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_items('WALLOPS', ':' + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_items('WHO', target, op and 'o')
def whois(self, targets):
"""Send a WHOIS command."""
self.send_items('WHOIS', ",".join(always_iterable(targets)))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_items('WHOWAS', nick, max, server)
def set_rate_limit(self, frequency):
"""
Set a `frequency` limit (messages per second) for this connection.
Any attempts to send faster than this rate will block.
"""
self.send_raw = Throttler(self.send_raw, frequency)
def set_keepalive(self, interval):
"""
Set a keepalive to occur every `interval` on this `ServerConnection`.
:param interval: `int` in seconds, or `datetime.timedelta`
"""
pinger = functools.partial(self.ping, 'keep-alive')
self.reactor.scheduler.execute_every(period=interval, func=pinger)
class PrioritizedHandler(collections.namedtuple('Base', ('priority', 'callback'))):
def __lt__(self, other):
"when sorting prioritized handlers, only use the priority"
return self.priority < other.priority
class Reactor:
"""
Processes events from one or more IRC server connections.
This class implements a reactor in the style of the `reactor pattern
<http://en.wikipedia.org/wiki/Reactor_pattern>`_.
When a Reactor object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the reactor object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler,
process_once, and process_forever.
This is functionally an event-loop which can either use it's own
internal polling loop, or tie into an external event-loop, by
having the external event-system periodically call `process_once`
on the instantiated reactor class. This will allow the reactor
to process any queued data and/or events.
Calling `process_forever` will hand off execution to the reactor's
internal event-loop, which will not return for the life of the
reactor.
Here is an example:
client = irc.client.Reactor()
server = client.server()
server.connect("irc.some.where", 6667, "my_nickname")
server.privmsg("a_nickname", "Hi there!")
client.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message "Hi there!"
to the nickname a_nickname.
The methods of this class are thread-safe; accesses to and modifications
of its internal lists of connections, handlers, and delayed commands
are guarded by a mutex.
"""
scheduler_class = schedule.DefaultScheduler
connection_class = ServerConnection
def __do_nothing(*args, **kwargs):
pass
def __init__(self, on_connect=__do_nothing, on_disconnect=__do_nothing):
"""Constructor for Reactor objects.
on_connect: optional callback invoked when a new connection
is made.
on_disconnect: optional callback invoked when a socket is
disconnected.
The arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
self._on_connect = on_connect
self._on_disconnect = on_disconnect
scheduler = self.scheduler_class()
assert isinstance(scheduler, schedule.IScheduler)
self.scheduler = scheduler
self.connections = []
self.handlers = {}
# Modifications to these shared lists and dict need to be thread-safe
self.mutex = threading.RLock()
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
conn = self.connection_class(self)
with self.mutex:
self.connections.append(conn)
return conn
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for Reactor.__init__.
"""
with self.mutex:
log.log(logging.DEBUG - 2, "process_data()")
for sock, conn in itertools.product(sockets, self.connections):
if sock == conn.socket:
conn.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for Reactor.__init__.
"""
with self.mutex:
self.scheduler.run_pending()
@property
def sockets(self):
with self.mutex:
return [
conn.socket
for conn in self.connections
if conn is not None and conn.socket is not None
]
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
log.log(logging.DEBUG - 2, "process_once()")
sockets = self.sockets
if sockets:
in_, out, err = select.select(sockets, [], [], timeout)
self.process_data(in_)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
# This loop should specifically *not* be mutex-locked.
# Otherwise no other thread would ever be able to change
# the shared state of a Reactor object running this function.
log.debug("process_forever(timeout=%s)", timeout)
one = functools.partial(self.process_once, timeout=timeout)
consume(repeatfunc(one))
def disconnect_all(self, message=""):
"""Disconnects all connections."""
with self.mutex:
for conn in self.connections:
conn.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of
numeric_events for possible event types.
handler -- Callback function taking 'connection' and 'event'
parameters.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
"NO MORE", no more handlers will be called.
"""
handler = PrioritizedHandler(priority, handler)
with self.mutex:
event_handlers = self.handlers.setdefault(event, [])
bisect.insort(event_handlers, handler)
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
with self.mutex:
if event not in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h.callback:
self.handlers[event].remove(h)
return 1
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
with self.mutex:
conn = DCCConnection(self, dcctype)
self.connections.append(conn)
return conn
def _handle_event(self, connection, event):
"""
Handle an Event event incoming on ServerConnection connection.
"""
with self.mutex:
matching_handlers = sorted(
self.handlers.get("all_events", []) + self.handlers.get(event.type, [])
)
for handler in matching_handlers:
result = handler.callback(connection, event)
if result == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
with self.mutex:
self.connections.remove(connection)
self._on_disconnect(connection.socket)
_cmd_pat = (
"^(@(?P<tags>[^ ]*) )?(:(?P<prefix>[^ ]+) +)?"
"(?P<command>[^ ]+)( *(?P<argument> .+))?"
)
_rfc_1459_command_regexp = re.compile(_cmd_pat)
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""
A DCC (Direct Client Connection).
DCCConnection objects are instantiated by calling the dcc
method on a Reactor object.
"""
socket = None
connected = False
passive = False
peeraddress = None
peerport = None
def __init__(self, reactor, dcctype):
super(DCCConnection, self).__init__(reactor)
self.dcctype = dcctype
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.buffer = buffer.LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error as x:
raise DCCConnectionError("Couldn't connect to socket: %s" % x)
self.connected = True
self.reactor._on_connect(self.socket)
return self
def listen(self, addr=None):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.buffer = buffer.LineBuffer()
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = True
default_addr = socket.gethostbyname(socket.gethostname()), 0
try:
self.socket.bind(addr or default_addr)
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error as x:
raise DCCConnectionError("Couldn't bind socket: %s" % x)
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self.reactor._handle_event(
self, Event("dcc_disconnect", self.peeraddress, "", [message])
)
self.reactor._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = True
log.debug("DCC connection from %s:%d", self.peeraddress, self.peerport)
self.reactor._handle_event(
self, Event("dcc_connect", self.peeraddress, None, None)
)
return
try:
new_data = self.socket.recv(2 ** 14)
except socket.error:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
self.buffer.feed(new_data)
chunks = list(self.buffer)
if len(self.buffer) > 2 ** 14:
# Bad peer! Naughty peer!
log.info(
"Received >16k from a peer without a newline; " "disconnecting."
)
self.disconnect()
return
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
log.debug("FROM PEER: %s", chunk)
arguments = [chunk]
log.debug(
"command: %s, source: %s, target: %s, arguments: %s",
command,
prefix,
target,
arguments,
)
event = Event(command, prefix, target, arguments)
self.reactor._handle_event(self, event)
def privmsg(self, text):
"""
Send text to DCC peer.
The text will be padded with a newline if it's a DCC CHAT session.
"""
if self.dcctype == 'chat':
text += '\n'
return self.send_bytes(self.encode(text))
def send_bytes(self, bytes):
"""
Send data to DCC peer.
"""
try:
self.socket.send(bytes)
log.debug("TO PEER: %r\n", bytes)
except socket.error:
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Functionally, any of the event names in `events.py` may be subscribed
to by prefixing them with `on_`, and creating a function of that
name in the child-class of `SimpleIRCClient`. When the event of
`event_name` is received, the appropriately named method will be
called (if it exists) by runtime class introspection.
See `_dispatcher()`, which takes the event name, postpends it to
`on_`, and then attemps to look up the class member function by
name and call it.
Instance attributes that can be used by sub classes:
reactor -- The Reactor instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
reactor_class = Reactor
def __init__(self):
self.reactor = self.reactor_class()
self.connection = self.reactor.server()
self.dcc_connections = []
self.reactor.add_global_handler("all_events", self._dispatcher, -10)
self.reactor.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, connection, event):
"""
Dispatch events to on_<event.type> method, if present.
"""
log.debug("_dispatcher: %s", event.type)
def do_nothing(connection, event):
return None
method = getattr(self, "on_" + event.type, do_nothing)
method(connection, event)
def _dcc_disconnect(self, connection, event):
self.dcc_connections.remove(connection)
def connect(self, *args, **kwargs):
"""Connect using the underlying connection"""
self.connection.connect(*args, **kwargs)
def dcc(self, *args, **kwargs):
"""Create and associate a new DCCConnection object.
Use the returned object to listen for or connect to
a DCC peer.
"""
dcc = self.reactor.dcc(*args, **kwargs)
self.dcc_connections.append(dcc)
return dcc
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
warnings.warn("Use self.dcc(type).connect()", DeprecationWarning)
return self.dcc(dcctype).connect(address, port)
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
warnings.warn("Use self.dcc(type).listen()", DeprecationWarning)
return self.dcc(dcctype).listen()
def start(self):
"""Start the IRC client."""
self.reactor.process_forever()
class Event:
"""
An IRC event.
>>> print(Event('privmsg', '@somebody', '#channel'))
type: privmsg, source: @somebody, target: #channel, arguments: [], tags: []
"""
def __init__(self, type, source, target, arguments=None, tags=None):
"""
Initialize an Event.
Arguments:
type -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event-specific arguments.
"""
self.type = type
self.source = source
self.target = target
if arguments is None:
arguments = []
self.arguments = arguments
if tags is None:
tags = []
self.tags = tags
def __str__(self):
tmpl = (
"type: {type}, "
"source: {source}, "
"target: {target}, "
"arguments: {arguments}, "
"tags: {tags}"
)
return tmpl.format(**vars(self))
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""
Convert an IP number as an integer given in ASCII
representation to an IP address string.
>>> ip_numstr_to_quad('3232235521')
'192.168.0.1'
>>> ip_numstr_to_quad(3232235521)
'192.168.0.1'
"""
packed = struct.pack('>L', int(num))
bytes = struct.unpack('BBBB', packed)
return ".".join(map(str, bytes))
def ip_quad_to_numstr(quad):
"""
Convert an IP address string (e.g. '192.168.0.1') to an IP
number as a base-10 integer given in ASCII representation.
>>> ip_quad_to_numstr('192.168.0.1')
'3232235521'
"""
bytes = map(int, quad.split("."))
packed = struct.pack('BBBB', *bytes)
return str(struct.unpack('>L', packed)[0])
class NickMask(str):
"""
A nickmask (the source of an Event)
>>> nm = NickMask('pinky!username@example.com')
>>> nm.nick
'pinky'
>>> nm.host
'example.com'
>>> nm.user
'username'
>>> isinstance(nm, str)
True
>>> nm = NickMask('красный!red@yahoo.ru')
>>> isinstance(nm.nick, str)
True
Some messages omit the userhost. In that case, None is returned.
>>> nm = NickMask('irc.server.net')
>>> nm.nick
'irc.server.net'
>>> nm.userhost
>>> nm.host
>>> nm.user
"""
@classmethod
def from_params(cls, nick, user, host):
return cls('{nick}!{user}@{host}'.format(**vars()))
@property
def nick(self):
nick, sep, userhost = self.partition("!")
return nick
@property
def userhost(self):
nick, sep, userhost = self.partition("!")
return userhost or None
@property
def host(self):
nick, sep, userhost = self.partition("!")
user, sep, host = userhost.partition('@')
return host or None
@property
def user(self):
nick, sep, userhost = self.partition("!")
user, sep, host = userhost.partition('@')
return user or None
@classmethod
def from_group(cls, group):
return cls(group) if group else None
def _ping_ponger(connection, event):
"A global handler for the 'ping' event"
connection.pong(event.target)
| 31.135556 | 87 | 0.59872 |
e509f364e68d3eb4ba81036644ea86291d966415 | 338 | py | Python | aws_mock/requests/describe_key_pairs.py | enaydanov/aws_mock | 4ad3dca270ad164693e85741d5e92f845c34aa01 | [
"Apache-2.0"
] | null | null | null | aws_mock/requests/describe_key_pairs.py | enaydanov/aws_mock | 4ad3dca270ad164693e85741d5e92f845c34aa01 | [
"Apache-2.0"
] | 1 | 2021-10-21T21:06:29.000Z | 2021-10-21T21:06:29.000Z | aws_mock/requests/describe_key_pairs.py | bentsi/aws_mock | d6c1b963e02b4cd3602722e7135f4d65f6a71d3e | [
"Apache-2.0"
] | 1 | 2021-11-08T14:20:36.000Z | 2021-11-08T14:20:36.000Z | from aws_mock.lib import get_aws_mock_db, aws_response
@aws_response
def describe_key_pairs(key_name: str) -> dict | tuple[str, dict, int]:
if key_pair := get_aws_mock_db()["key"].find_one({"name": key_name}):
return {"items": [key_pair]}
return "responses/describe_key_pairs_not_found.xml", {"key_name": key_name}, 400
| 37.555556 | 84 | 0.718935 |
cbabcf3465df8f07bcc5fb6b9af57058b78523cc | 15,398 | py | Python | bloop.py | bondscripting/bloop | 7ee0bc16458670c7689dbc8aeed012c1fb9f307a | [
"MIT"
] | null | null | null | bloop.py | bondscripting/bloop | 7ee0bc16458670c7689dbc8aeed012c1fb9f307a | [
"MIT"
] | null | null | null | bloop.py | bondscripting/bloop | 7ee0bc16458670c7689dbc8aeed012c1fb9f307a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import math;
from PIL import Image, ImageDraw;
import sys;
import xml.etree.ElementTree as ET;
class BloopException(Exception):
pass;
def main():
try:
inputFileName = None;
outputFileName = "bloop.png";
args = {};
geometry = None;
numSamples = 1;
i = 1;
numArgs = len(sys.argv);
while (i < numArgs):
arg = sys.argv[i];
i += 1;
if (arg == "-i"):
if (i < numArgs):
inputFileName = sys.argv[i];
i += 1;
else:
raise BloopException("Missing argument to '-i'.");
elif (arg == "-g"):
if (i < numArgs):
geometry = tuple(int(v) for v in sys.argv[i].split(","));
i += 1;
if ((len(geometry) != 2) and (len(geometry) != 4)):
raise BloopException("Geometry passed to -g must have 2 or 4 arguments.");
else:
raise BloopException("Missing argument to '-g'.");
elif (arg == "-o"):
if (i < numArgs):
outputFileName = sys.argv[i];
i += 1;
else:
raise BloopException("Missing argument to '-o'.");
elif (arg == "-s"):
if (i < numArgs):
numSamples = int(sys.argv[i]);
if (numSamples <= 0):
raise BloopException("Argument passed to -s must be 1 or greater.");
i += 1;
else:
raise BloopException("Missing argument to '-s'.");
else:
if (i < numArgs):
args[arg] = sys.argv[i];
i += 1;
else:
raise BloopException("Missing argument to '{0}'.".format(arg));
if (not inputFileName):
raise BloopException("Missing input file.");
if (not geometry):
raise BloopException("Missing geometry.");
ProcessInputFile(inputFileName, outputFileName, args, geometry, numSamples);
except BloopException as e:
print(str(e));
exit(1);
def ProcessInputFile(inputFileName, outputFileName, args, geometry, numSamples):
tree = ET.parse(inputFileName);
rootElement = tree.getroot();
resolvedArgs, sceneFactory = ParseScene(rootElement, args);
scene = sceneFactory.CreateObject(resolvedArgs);
backColor = ColorFromRGBA(resolvedArgs["color"]);
DrawImage(outputFileName, scene, backColor, geometry, numSamples);
def ParseScene(element, args):
parsedDefinitions = False;
parsers = {**CORE_PARSERS};
parsedParams = False;
params = {**SCENE_PARAMS};
objectFactory = None;
for child in element:
if (child.tag == "params"):
if (parsedParams or objectFactory):
raise BloopException("Unexpected element <params>.".format(child.tag));
ParseParameters(child, params);
parsedParams = True;
elif (child.tag == "define"):
if (parsedDefinitions or objectFactory):
raise BloopException("Unexpected element <define>.".format(child.tag));
ParseDefinitions(child, parsers);
parsedDefinitions = True;
else:
if (objectFactory):
raise BloopException("Unexpected element <{0}>.".format(child.tag));
objectFactory = ParseObject(child, parsers);
if (not objectFactory):
raise BloopException("No object in scene.");
ValidateArguments(args, params, element.tag);
resolvedArgs = ResolveArgs(args, {});
return resolvedArgs, objectFactory;
def ParseDefinitions(element, parsers):
for child in element:
ParseDefinition(child, parsers);
def ParseDefinition(element, parsers):
parsedParams = False;
params = {**CORE_OBJECT_PARAMS};
childFactory = None;
if (element.tag in parsers):
raise BloopException("Duplicate definition of object type '{0}'.".format(element.tag));
for child in element:
if (child.tag == "params"):
if (parsedParams or childFactory):
raise BloopException("Unexpected element <params>.".format(child.tag));
ParseParameters(child, params);
parsedParams = True;
else:
if (childFactory):
raise BloopException("Object of type '{0}' can have at most one child.".format(element.tag));
childFactory = ParseObject(child, parsers);
if (not childFactory):
raise BloopException("No child in user defined object type '{0}'.".format(element.tag));
parsers[element.tag] = ObjectParser(BaseObject, params, PreparsedChildParser(childFactory));
def ParseParameters(element, params):
for child in element:
ParseParameter(child, params);
def ParseParameter(element, params):
name = None;
defaultValue = None;
for k, v in element.attrib.items():
if (k == "name"):
name = v;
elif (k == "default"):
defaultValue = v;
else:
raise BloopException("Unexpected attribute '{0}'.".format(k));
if (not name):
raise BloopException("Parameter name is not defined.");
if name in params:
raise BloopException("Duplicate parameter name '{0}'.".format(name));
params[name] = defaultValue;
def ParseObjects(element, parsers):
objectFactories = [ParseObject(child, parsers) for child in element];
return objectFactories;
def ParseObject(element, parsers):
parser = parsers.get(element.tag);
if (not parser):
raise BloopException("Unknown object type '{0}'".format(element.tag));
factory = parser.Parse(element, parsers);
return factory;
def ValidateArguments(args, params, T):
for k in args:
if (not k in params):
raise BloopException("Object type '{0}' does not have a parameter named '{1}'.".format(T, k));
for k, v in params.items():
if (not k in args):
if (not v):
raise BloopException("Object of type '{0}' is missing argument for parameter '{1}' with no default value.".format(T, k));
args[k] = v;
def ResolveArgs(args, parentArgs):
return {k: eval(v, {}, parentArgs) for k, v in args.items()};
class ObjectParser:
def __init__(self, T, params, childParser):
self.T = T;
self.params = params;
self.childParser = childParser;
def Parse(self, element, parsers):
args = {};
childFactories = None;
for k, v in element.attrib.items():
args[k] = v;
ValidateArguments(args, self.params, element.tag);
childFactories = self.childParser.Parse(element, parsers);
factory = ObjectFactory(self.T, args, childFactories);
return factory;
class NullChildParser:
def Parse(self, element, parsers):
if (len(element) > 0):
raise BloopException("Object of type '{0}' cannot have children.".format(element.tag));
return [];
class SingleChildParser:
def Parse(self, element, parsers):
if (len(element) > 1):
raise BloopException("Object of type '{0}' can have at most one child.".format(element.tag));
childFactories = ParseObjects(element, parsers);
return childFactories;
class ListChildParser:
def Parse(self, element, parsers):
childFactories = ParseObjects(element, parsers);
return childFactories;
class PreparsedChildParser:
def __init__(self, childFactory):
self.childFactory = childFactory;
def Parse(self, element, parsers):
if (len(element) > 0):
raise BloopException("Object of type '{0}' cannot have children.".format(element.tag));
return [self.childFactory];
class ObjectFactory:
def __init__(self, T, args, childFactories):
self.T = T;
self.args = args;
self.childFactories = childFactories;
def CreateObject(self, parentArgs):
resolvedArgs = ResolveArgs(self.args, parentArgs);
unifiedArgs = {**parentArgs, **resolvedArgs};
children = [factory.CreateObject(unifiedArgs) for factory in self.childFactories];
return self.T(resolvedArgs, children);
class BaseObject:
def __init__(self, args, children):
self.x = args["x"];
self.y = args["y"];
self.rgba = args["color"];
self.color = ColorFromRGBA(self.rgba);
self.children = children;
pass;
def ToLocalCoordinates(self, x, y):
return x - self.x, y - self.y;
def ProbeHiResWithDefault(self, x, y, defaultColor, numSamples):
offset = 1 / (numSamples * 2);
colorHistogram = {};
for j in range(numSamples):
yj = j / numSamples;
for i in range(numSamples):
xi = i / numSamples;
color = self.ProbeWithDefault(x + offset + xi, y + offset + yj, defaultColor);
if (color in colorHistogram):
colorHistogram[color] += 1;
else:
colorHistogram[color] = 1;
sortedColors = sorted(colorHistogram.items(), key=lambda x: x[1], reverse=True);
color = BlendColors(sortedColors);
return color;
def ProbeWithDefault(self, x, y, defaultColor):
color = self.Probe(x, y);
if (not color):
color = defaultColor;
return color;
def Probe(self, x, y):
x, y = self.ToLocalCoordinates(x, y);
for child in self.children:
return child.Probe(x, y);
return None;
class Circle(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
self.radius = args["radius"];
def Probe(self, x, y):
x, y = self.ToLocalCoordinates(x, y);
if (((x * x) + (y * y)) < (self.radius * self.radius)):
return self.color;
return None;
class Ellipse(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
width = args["width"];
height = args["height"];
if ((width <= 0) or (height <= 0)):
self.radius = 0;
self.scx = 1;
self.scy = 1;
elif (width >= height):
self.radius = width / 2;
self.scx = 1;
self.scy = height / width;
else:
self.radius = height / 2;
self.scx = width / height;
self.scy = 1;
def ToLocalCoordinates(self, x, y):
x, y = super().ToLocalCoordinates(x, y);
return (x - self.radius) / self.scx, (y - self.radius) / self.scy;
def Probe(self, x, y):
x, y = self.ToLocalCoordinates(x, y);
if (((x * x) + (y * y)) < (self.radius * self.radius)):
return self.color;
return None;
class Rectangle(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
self.width = args["width"];
self.height = args["height"];
def Probe(self, x, y):
x, y = self.ToLocalCoordinates(x, y);
if ((x >= 0) and (x < self.width) and (y >= 0) and (y < self.height)):
return self.color;
return None;
class Union(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
def Probe(self, x, y):
x, y = self.ToLocalCoordinates(x, y);
for child in reversed(self.children):
result = child.Probe(x, y);
if (result):
return result;
return None;
class Intersect(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
def Probe(self, x, y):
x, y = self.ToLocalCoordinates(x, y);
result = None;
for child in self.children:
result = child.Probe(x, y);
if (result == None):
return None;
return result;
class Subtract(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
def Probe(self, x, y):
result = None;
if (len(self.children) > 0):
x, y = self.ToLocalCoordinates(x, y);
result = self.children[0].Probe(x, y);
for i in range(1, len(self.children)):
if (self.children[i].Probe(x, y) != None):
return None;
return result;
class Rotate(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
self.angle = math.radians(args["angle"]);
def ToLocalCoordinates(self, x, y):
x, y = super().ToLocalCoordinates(x, y);
cs = math.cos(-self.angle);
sn = math.sin(-self.angle);
return (x * cs) - (y * sn), (x * sn) + (y * cs);
class Scale(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
self.scx = args["scx"];
self.scy = args["scy"];
def ToLocalCoordinates(self, x, y):
x, y = super().ToLocalCoordinates(x, y);
return x / self.scx, y / self.scy;
class Shear(BaseObject):
def __init__(self, args, children):
super().__init__(args, children);
self.scx = args["shx"];
self.scy = args["shy"];
def ToLocalCoordinates(self, x, y):
x, y = super().ToLocalCoordinates(x, y);
return x - (self.scx * y), y - (self.scy * x);
def DrawImage(outputFileName, scene, backColor, geometry, numSamples):
width = geometry[0];
height = geometry[1];
xOffset = 0;
yOffset = 0;
if (len(geometry) == 4):
xOffset = geometry[2];
yOffset = geometry[3];
image = Image.new("RGBA", (width, height), backColor);
draw = ImageDraw.Draw(image);
class CacheElement:
def init(self):
self.color = None;
self.processed = False;
prevRow = tuple(CacheElement() for x in range(width));
currentRow = tuple(CacheElement() for x in range(width));
for y in range(height):
for x in range(width):
color = scene.ProbeWithDefault(x + xOffset + 0.5, y + yOffset + 0.5, backColor);
current = currentRow[x];
current.color = color;
current.processed = False;
process = False;
if ((y > 0) and (numSamples > 1)):
j = y - 1;
for i in range(max(x - 1, 0), min(x + 2, width)):
prev = prevRow[i];
if (prev.color != color):
process = True;
if (not prev.processed):
c = scene.ProbeHiResWithDefault(i + xOffset, j + yOffset, backColor, numSamples);
draw.point((i, j), c);
prev.processed = True;
if ((x > 0) and (numSamples > 1)):
i = x - 1;
prev = currentRow[i];
if (prev.color != color):
process = True;
if (not prev.processed):
c = scene.ProbeHiResWithDefault(i + xOffset, y + yOffset, backColor, numSamples);
draw.point((i, y), c);
prev.processed = True;
if (process):
color = scene.ProbeHiResWithDefault(x + xOffset, y + yOffset, backColor, numSamples);
current.processed = True;
draw.point((x, y), color);
prevRow, currentRow = currentRow, prevRow;
image.save(outputFileName, "PNG");
def ColorFromRGBA(rgba):
if (type(rgba) is tuple):
return rgba;
r = (rgba >> 24) & 0xff;
g = (rgba >> 16) & 0xff;
b = (rgba >> 8) & 0xff;
a = rgba & 0xff;
return (r, g, b, a);
def BlendColors(colorsAndWeights):
result = (0, 0, 0, 0);
totalRGBWeight = 0;
totalAlphaWeight = 0;
for color, weight in colorsAndWeights:
rgbWeight = weight * color[3];
totalRGBWeight += rgbWeight;
totalAlphaWeight += weight;
rgbT = rgbWeight / max(totalRGBWeight, SMALL_FLOAT);
alphaT = weight / max(totalAlphaWeight, SMALL_FLOAT);
result = InterpolateColors(result, color, rgbT, alphaT);
return result;
def InterpolateColors(colorA, colorB, rgbT, alphaT):
return (
round(math.sqrt(Lerp(colorA[0]**2, colorB[0]**2, rgbT))),
round(math.sqrt(Lerp(colorA[1]**2, colorB[1]**2, rgbT))),
round(math.sqrt(Lerp(colorA[2]**2, colorB[2]**2, rgbT))),
round(Lerp(colorA[3], colorB[3], alphaT))
);
def Lerp(a, b, t):
return ((1 - t) * a) + (t * b);
SCENE_PARAMS = {
"color": "0xffffffff"
};
CORE_OBJECT_PARAMS = {
"x": "0",
"y": "0",
"color": "color"
};
CIRCLE_PARAMS = {
**CORE_OBJECT_PARAMS,
"radius": None
};
ELLIPSE_PARAMS = {
**CORE_OBJECT_PARAMS,
"width": None,
"height": None
};
RECTANGLE_PARAMS = {
**CORE_OBJECT_PARAMS,
"width": None,
"height": None
};
ROTATE_PARAMS = {
**CORE_OBJECT_PARAMS,
"angle": "0"
};
SCALE_PARAMS = {
**CORE_OBJECT_PARAMS,
"scx": "1",
"scy": "1"
};
SHEAR_PARAMS = {
**CORE_OBJECT_PARAMS,
"shx": "0",
"shy": "0"
};
CORE_PARSERS = {
"circle": ObjectParser(Circle, CIRCLE_PARAMS, NullChildParser()),
"ellipse": ObjectParser(Ellipse, ELLIPSE_PARAMS, NullChildParser()),
"rectangle": ObjectParser(Rectangle, RECTANGLE_PARAMS, NullChildParser()),
"union": ObjectParser(Union, CORE_OBJECT_PARAMS, ListChildParser()),
"intersect": ObjectParser(Intersect, CORE_OBJECT_PARAMS, ListChildParser()),
"subtract": ObjectParser(Subtract, CORE_OBJECT_PARAMS, ListChildParser()),
"rotate": ObjectParser(Rotate, ROTATE_PARAMS, SingleChildParser()),
"scale": ObjectParser(Scale, SCALE_PARAMS, SingleChildParser()),
"shear": ObjectParser(Shear, SHEAR_PARAMS, SingleChildParser())
};
SMALL_FLOAT = 1.0 / 2**16;
if __name__ == "__main__":
main();
| 25.706177 | 125 | 0.663723 |
fd680d3e7cd14b5027be1c9d05b9350a6bc76107 | 30,078 | py | Python | flax/wallet/trade_manager.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 174 | 2021-06-16T17:49:22.000Z | 2022-03-17T03:03:17.000Z | flax/wallet/trade_manager.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 49 | 2021-06-17T14:10:53.000Z | 2022-01-31T11:04:21.000Z | flax/wallet/trade_manager.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 80 | 2021-06-17T14:23:31.000Z | 2022-02-24T05:52:47.000Z | import logging
import time
import traceback
from pathlib import Path
from secrets import token_bytes
from typing import Any, Dict, List, Optional, Tuple
from blspy import AugSchemeMPL
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.spend_bundle import SpendBundle
from flax.types.coin_spend import CoinSpend
from flax.util.byte_types import hexstr_to_bytes
from flax.util.db_wrapper import DBWrapper
from flax.util.hash import std_hash
from flax.util.ints import uint32, uint64
from flax.wallet.cc_wallet import cc_utils
from flax.wallet.cc_wallet.cc_utils import CC_MOD, SpendableCC, spend_bundle_for_spendable_ccs, uncurry_cc
from flax.wallet.cc_wallet.cc_wallet import CCWallet
from flax.wallet.puzzles.genesis_by_coin_id_with_0 import genesis_coin_id_for_genesis_coin_checker
from flax.wallet.trade_record import TradeRecord
from flax.wallet.trading.trade_status import TradeStatus
from flax.wallet.trading.trade_store import TradeStore
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.util.trade_utils import (
get_discrepancies_for_spend_bundle,
get_output_amount_for_puzzle_and_solution,
get_output_discrepancy_for_puzzle_and_solution,
)
from flax.wallet.util.transaction_type import TransactionType
from flax.wallet.util.wallet_types import WalletType
from flax.wallet.wallet import Wallet
from flax.wallet.wallet_coin_record import WalletCoinRecord
class TradeManager:
wallet_state_manager: Any
log: logging.Logger
trade_store: TradeStore
@staticmethod
async def create(
wallet_state_manager: Any,
db_wrapper: DBWrapper,
name: str = None,
):
self = TradeManager()
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.trade_store = await TradeStore.create(db_wrapper)
return self
async def get_offers_with_status(self, status: TradeStatus) -> List[TradeRecord]:
records = await self.trade_store.get_trade_record_with_status(status)
return records
async def get_coins_of_interest(
self,
) -> Tuple[Dict[bytes32, Coin], Dict[bytes32, Coin]]:
"""
Returns list of coins we want to check if they are included in filter,
These will include coins that belong to us and coins that that on other side of treade
"""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
removals = {}
additions = {}
for trade in all_pending:
for coin in trade.removals:
removals[coin.name()] = coin
for coin in trade.additions:
additions[coin.name()] = coin
return removals, additions
async def get_trade_by_coin(self, coin: Coin) -> Optional[TradeRecord]:
all_trades = await self.get_all_trades()
for trade in all_trades:
if trade.status == TradeStatus.CANCELED.value:
continue
if coin in trade.removals:
return trade
if coin in trade.additions:
return trade
return None
async def coins_of_interest_farmed(self, removals: List[Coin], additions: List[Coin], height: uint32):
"""
If both our coins and other coins in trade got removed that means that trade was successfully executed
If coins from other side of trade got farmed without ours, that means that trade failed because either someone
else completed trade or other side of trade canceled the trade by doing a spend.
If our coins got farmed but coins from other side didn't, we successfully canceled trade by spending inputs.
"""
removal_dict = {}
addition_dict = {}
checked: Dict[bytes32, Coin] = {}
for coin in removals:
removal_dict[coin.name()] = coin
for coin in additions:
addition_dict[coin.name()] = coin
all_coins = []
all_coins.extend(removals)
all_coins.extend(additions)
for coin in all_coins:
if coin.name() in checked:
continue
trade = await self.get_trade_by_coin(coin)
if trade is None:
self.log.error(f"Coin: {Coin}, not in any trade")
continue
# Check if all coins that are part of the trade got farmed
# If coin is missing, trade failed
failed = False
for removed_coin in trade.removals:
if removed_coin.name() not in removal_dict:
self.log.error(f"{removed_coin} from trade not removed")
failed = True
checked[removed_coin.name()] = removed_coin
for added_coin in trade.additions:
if added_coin.name() not in addition_dict:
self.log.error(f"{added_coin} from trade not added")
failed = True
checked[coin.name()] = coin
if failed is False:
# Mark this trade as successful
await self.trade_store.set_status(trade.trade_id, TradeStatus.CONFIRMED, True, height)
self.log.info(f"Trade with id: {trade.trade_id} confirmed at height: {height}")
else:
# Either we canceled this trade or this trade failed
if trade.status == TradeStatus.PENDING_CANCEL.value:
await self.trade_store.set_status(trade.trade_id, TradeStatus.CANCELED, True)
self.log.info(f"Trade with id: {trade.trade_id} canceled at height: {height}")
elif trade.status == TradeStatus.PENDING_CONFIRM.value:
await self.trade_store.set_status(trade.trade_id, TradeStatus.FAILED, True)
self.log.warning(f"Trade with id: {trade.trade_id} failed at height: {height}")
async def get_locked_coins(self, wallet_id: int = None) -> Dict[bytes32, WalletCoinRecord]:
"""Returns a dictionary of confirmed coins that are locked by a trade."""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
if len(all_pending) == 0:
return {}
result = {}
for trade_offer in all_pending:
if trade_offer.tx_spend_bundle is None:
locked = await self.get_locked_coins_in_spend_bundle(trade_offer.spend_bundle)
else:
locked = await self.get_locked_coins_in_spend_bundle(trade_offer.tx_spend_bundle)
for name, record in locked.items():
if wallet_id is None or record.wallet_id == wallet_id:
result[name] = record
return result
async def get_all_trades(self):
all: List[TradeRecord] = await self.trade_store.get_all_trades()
return all
async def get_trade_by_id(self, trade_id: bytes) -> Optional[TradeRecord]:
record = await self.trade_store.get_trade_record(trade_id)
return record
async def get_locked_coins_in_spend_bundle(self, bundle: SpendBundle) -> Dict[bytes32, WalletCoinRecord]:
"""Returns a list of coin records that are used in this SpendBundle"""
result = {}
removals = bundle.removals()
for coin in removals:
coin_record = await self.wallet_state_manager.coin_store.get_coin_record(coin.name())
if coin_record is None:
continue
result[coin_record.name()] = coin_record
return result
async def cancel_pending_offer(self, trade_id: bytes32):
await self.trade_store.set_status(trade_id, TradeStatus.CANCELED, False)
async def cancel_pending_offer_safely(self, trade_id: bytes32):
"""This will create a transaction that includes coins that were offered"""
self.log.info(f"Secure-Cancel pending offer with id trade_id {trade_id.hex()}")
trade = await self.trade_store.get_trade_record(trade_id)
if trade is None:
return None
all_coins = trade.removals
for coin in all_coins:
wallet = await self.wallet_state_manager.get_wallet_for_coin(coin.name())
if wallet is None:
continue
new_ph = await wallet.get_new_puzzlehash()
if wallet.type() == WalletType.COLOURED_COIN.value:
tx = await wallet.generate_signed_transaction(
[coin.amount], [new_ph], 0, coins={coin}, ignore_max_send_amount=True
)
else:
tx = await wallet.generate_signed_transaction(
coin.amount, new_ph, 0, coins={coin}, ignore_max_send_amount=True
)
await self.wallet_state_manager.add_pending_transaction(tx_record=tx)
await self.trade_store.set_status(trade_id, TradeStatus.PENDING_CANCEL, False)
return None
async def save_trade(self, trade: TradeRecord):
await self.trade_store.add_trade_record(trade, False)
async def create_offer_for_ids(
self, offer: Dict[int, int], file_name: str
) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
success, trade_offer, error = await self._create_offer_for_ids(offer)
if success is True and trade_offer is not None:
self.write_offer_to_disk(Path(file_name), trade_offer)
await self.save_trade(trade_offer)
return success, trade_offer, error
async def _create_offer_for_ids(self, offer: Dict[int, int]) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
"""
Offer is dictionary of wallet ids and amount
"""
spend_bundle = None
try:
for id in offer.keys():
amount = offer[id]
wallet_id = uint32(int(id))
wallet = self.wallet_state_manager.wallets[wallet_id]
if isinstance(wallet, CCWallet):
balance = await wallet.get_confirmed_balance()
if balance < abs(amount) and amount < 0:
raise Exception(f"insufficient funds in wallet {wallet_id}")
if amount > 0:
if spend_bundle is None:
to_exclude: List[Coin] = []
else:
to_exclude = spend_bundle.removals()
zero_spend_bundle: SpendBundle = await wallet.generate_zero_val_coin(False, to_exclude)
if spend_bundle is None:
spend_bundle = zero_spend_bundle
else:
spend_bundle = SpendBundle.aggregate([spend_bundle, zero_spend_bundle])
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
zero_val_coin: Optional[Coin] = None
for add in additions:
if add not in removals and add.amount == 0:
zero_val_coin = add
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount, zero_val_coin)
else:
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount)
elif isinstance(wallet, Wallet):
if spend_bundle is None:
to_exclude = []
else:
to_exclude = spend_bundle.removals()
new_spend_bundle = await wallet.create_spend_bundle_relative_flax(amount, to_exclude)
else:
return False, None, "unsupported wallet type"
if new_spend_bundle is None or new_spend_bundle.removals() == []:
raise Exception(f"Wallet {id} was unable to create offer.")
if spend_bundle is None:
spend_bundle = new_spend_bundle
else:
spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle])
if spend_bundle is None:
return False, None, None
now = uint64(int(time.time()))
trade_offer: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=None,
created_at_time=now,
my_offer=True,
sent=uint32(0),
spend_bundle=spend_bundle,
tx_spend_bundle=None,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_ACCEPT.value),
sent_to=[],
)
return True, trade_offer, None
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with creating trade offer: {type(e)}{tb}")
return False, None, str(e)
def write_offer_to_disk(self, file_path: Path, offer: TradeRecord):
if offer is not None:
file_path.write_text(bytes(offer).hex())
async def get_discrepancies_for_offer(self, file_path: Path) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
self.log.info(f"trade offer: {file_path}")
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(bytes.fromhex(trade_offer_hex))
return get_discrepancies_for_spend_bundle(trade_offer.spend_bundle)
async def get_inner_puzzle_for_puzzle_hash(self, puzzle_hash) -> Program:
info = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash(puzzle_hash)
assert info is not None
puzzle = self.wallet_state_manager.main_wallet.puzzle_for_pk(bytes(info.pubkey))
return puzzle
async def maybe_create_wallets_for_offer(self, file_path: Path) -> bool:
success, result, error = await self.get_discrepancies_for_offer(file_path)
if not success or result is None:
return False
for key, value in result.items():
wsm = self.wallet_state_manager
wallet: Wallet = wsm.main_wallet
if key == "flax":
continue
self.log.info(f"value is {key}")
exists = await wsm.get_wallet_for_colour(key)
if exists is not None:
continue
await CCWallet.create_wallet_for_cc(wsm, wallet, key)
return True
async def respond_to_offer(self, file_path: Path) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
has_wallets = await self.maybe_create_wallets_for_offer(file_path)
if not has_wallets:
return False, None, "Unknown Error"
trade_offer = None
try:
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(hexstr_to_bytes(trade_offer_hex))
except Exception as e:
return False, None, f"Error: {e}"
if trade_offer is not None:
offer_spend_bundle: SpendBundle = trade_offer.spend_bundle
coinsols: List[CoinSpend] = [] # [] of CoinSpends
cc_coinsol_outamounts: Dict[bytes32, List[Tuple[CoinSpend, int]]] = dict()
aggsig = offer_spend_bundle.aggregated_signature
cc_discrepancies: Dict[bytes32, int] = dict()
flax_discrepancy = None
wallets: Dict[bytes32, Any] = dict() # colour to wallet dict
for coinsol in offer_spend_bundle.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
colour = bytes(genesis_checker).hex()
if colour not in wallets:
wallets[colour] = await self.wallet_state_manager.get_wallet_for_colour(colour)
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(wallets[colour].id())
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
# Store coinsol and output amount for later
if colour in cc_coinsol_outamounts:
cc_coinsol_outamounts[colour].append((coinsol, total))
else:
cc_coinsol_outamounts[colour] = [(coinsol, total)]
else:
# standard flax coin
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(1)
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
if flax_discrepancy is None:
flax_discrepancy = get_output_discrepancy_for_puzzle_and_solution(coinsol.coin, puzzle, solution)
else:
flax_discrepancy += get_output_discrepancy_for_puzzle_and_solution(coinsol.coin, puzzle, solution)
coinsols.append(coinsol)
flax_spend_bundle: Optional[SpendBundle] = None
if flax_discrepancy is not None:
flax_spend_bundle = await self.wallet_state_manager.main_wallet.create_spend_bundle_relative_flax(
flax_discrepancy, []
)
if flax_spend_bundle is not None:
for coinsol in coinsols:
flax_spend_bundle.coin_spends.append(coinsol)
zero_spend_list: List[SpendBundle] = []
spend_bundle = None
# create coloured coin
self.log.info(cc_discrepancies)
for colour in cc_discrepancies.keys():
if cc_discrepancies[colour] < 0:
my_cc_spends = await wallets[colour].select_coins(abs(cc_discrepancies[colour]))
else:
if flax_spend_bundle is None:
to_exclude: List = []
else:
to_exclude = flax_spend_bundle.removals()
my_cc_spends = await wallets[colour].select_coins(0)
if my_cc_spends is None or my_cc_spends == set():
zero_spend_bundle: SpendBundle = await wallets[colour].generate_zero_val_coin(False, to_exclude)
if zero_spend_bundle is None:
return (
False,
None,
"Unable to generate zero value coin. Confirm that you have flax available",
)
zero_spend_list.append(zero_spend_bundle)
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
my_cc_spends = set()
for add in additions:
if add not in removals and add.amount == 0:
my_cc_spends.add(add)
if my_cc_spends == set() or my_cc_spends is None:
return False, None, "insufficient funds"
# Create SpendableCC list and innersol_list with both my coins and the offered coins
# Firstly get the output coin
my_output_coin = my_cc_spends.pop()
spendable_cc_list = []
innersol_list = []
genesis_id = genesis_coin_id_for_genesis_coin_checker(Program.from_bytes(bytes.fromhex(colour)))
# Make the rest of the coins assert the output coin is consumed
for coloured_coin in my_cc_spends:
inner_solution = self.wallet_state_manager.main_wallet.make_solution(consumed=[my_output_coin.name()])
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(coloured_coin.puzzle_hash)
assert inner_puzzle is not None
sigs = await wallets[colour].get_sigs(inner_puzzle, inner_solution, coloured_coin.name())
sigs.append(aggsig)
aggsig = AugSchemeMPL.aggregate(sigs)
lineage_proof = await wallets[colour].get_lineage_proof_for_coin(coloured_coin)
spendable_cc_list.append(SpendableCC(coloured_coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
# Create SpendableCC for each of the coloured coins received
for cc_coinsol_out in cc_coinsol_outamounts[colour]:
cc_coinsol = cc_coinsol_out[0]
puzzle = Program.from_bytes(bytes(cc_coinsol.puzzle_reveal))
solution = Program.from_bytes(bytes(cc_coinsol.solution))
r = uncurry_cc(puzzle)
if r:
mod_hash, genesis_coin_checker, inner_puzzle = r
inner_solution = solution.first()
lineage_proof = solution.rest().rest().first()
spendable_cc_list.append(SpendableCC(cc_coinsol.coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
# Finish the output coin SpendableCC with new information
newinnerpuzhash = await wallets[colour].get_new_inner_hash()
outputamount = sum([c.amount for c in my_cc_spends]) + cc_discrepancies[colour] + my_output_coin.amount
inner_solution = self.wallet_state_manager.main_wallet.make_solution(
primaries=[{"puzzlehash": newinnerpuzhash, "amount": outputamount}]
)
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(my_output_coin.puzzle_hash)
assert inner_puzzle is not None
lineage_proof = await wallets[colour].get_lineage_proof_for_coin(my_output_coin)
spendable_cc_list.append(SpendableCC(my_output_coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
sigs = await wallets[colour].get_sigs(inner_puzzle, inner_solution, my_output_coin.name())
sigs.append(aggsig)
aggsig = AugSchemeMPL.aggregate(sigs)
if spend_bundle is None:
spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
Program.from_bytes(bytes.fromhex(colour)),
spendable_cc_list,
innersol_list,
[aggsig],
)
else:
new_spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
Program.from_bytes(bytes.fromhex(colour)),
spendable_cc_list,
innersol_list,
[aggsig],
)
spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle])
# reset sigs and aggsig so that they aren't included next time around
sigs = []
aggsig = AugSchemeMPL.aggregate(sigs)
my_tx_records = []
if zero_spend_list is not None and spend_bundle is not None:
zero_spend_list.append(spend_bundle)
spend_bundle = SpendBundle.aggregate(zero_spend_list)
if spend_bundle is None:
return False, None, "spend_bundle missing"
# Add transaction history for this trade
now = uint64(int(time.time()))
if flax_spend_bundle is not None:
spend_bundle = SpendBundle.aggregate([spend_bundle, flax_spend_bundle])
if flax_discrepancy < 0:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=token_bytes(),
amount=uint64(abs(flax_discrepancy)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=flax_spend_bundle,
additions=flax_spend_bundle.additions(),
removals=flax_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=flax_spend_bundle.name(),
)
else:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(flax_discrepancy)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=flax_spend_bundle,
additions=flax_spend_bundle.additions(),
removals=flax_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.INCOMING_TRADE.value),
name=flax_spend_bundle.name(),
)
my_tx_records.append(tx_record)
for colour, amount in cc_discrepancies.items():
wallet = wallets[colour]
if flax_discrepancy > 0:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.id(),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=spend_bundle.name(),
)
else:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.id(),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.INCOMING_TRADE.value),
name=token_bytes(),
)
my_tx_records.append(tx_record)
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(0),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=spend_bundle.name(),
)
now = uint64(int(time.time()))
trade_record: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=now,
created_at_time=now,
my_offer=False,
sent=uint32(0),
spend_bundle=offer_spend_bundle,
tx_spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.save_trade(trade_record)
await self.wallet_state_manager.add_pending_transaction(tx_record)
for tx in my_tx_records:
await self.wallet_state_manager.add_transaction(tx)
return True, trade_record, None
| 46.131902 | 118 | 0.60649 |
e764df28cf084a1afbbdb01417d393ab1012da3a | 8,256 | py | Python | ahds/tests/test_ahds.py | paulkorir/ahds | 813dd745f76165893d1dd43477a7653b5c02cdc0 | [
"Apache-2.0"
] | 6 | 2018-12-15T20:42:09.000Z | 2021-06-15T16:43:05.000Z | ahds/tests/test_ahds.py | paulkorir/ahds | 813dd745f76165893d1dd43477a7653b5c02cdc0 | [
"Apache-2.0"
] | 6 | 2019-04-08T09:59:53.000Z | 2021-09-21T20:18:13.000Z | ahds/tests/test_ahds.py | paulkorir/ahds | 813dd745f76165893d1dd43477a7653b5c02cdc0 | [
"Apache-2.0"
] | 1 | 2019-11-20T07:45:04.000Z | 2019-11-20T07:45:04.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import re
import sys
import ahds
from . import Py23FixTestCase, TEST_DATA_PATH
from ..ahds import parse_args, get_debug, get_literal, get_paths, set_file_and_paths, get_amira_file
from ..core import _str, _print
import numpy
def _parse_with_shlex(cmd):
import shlex
import sys
sys.argv = shlex.split(cmd)
args = parse_args()
return args
class TestArgs(Py23FixTestCase):
"""Tests for the main ahds entry point options"""
def test_file_only(self):
"""Test command with file only"""
args = _parse_with_shlex("ahds file.am")
self.assertFalse(args.debug)
self.assertEqual(args.file, ['file.am'])
self.assertFalse(args.literal)
self.assertFalse(args.load_streams)
def test_file_with_path(self):
"""Test command with file and path"""
args = _parse_with_shlex("ahds file.am header.Parameters")
self.assertFalse(args.debug)
self.assertEqual(args.file, ['file.am', 'header.Parameters'])
self.assertFalse(args.literal)
self.assertFalse(args.load_streams)
def test_debug(self):
"""Test debug option"""
args = _parse_with_shlex("ahds -d file.am")
self.assertTrue(args.debug)
def test_literal(self):
"""Test literal option"""
args = _parse_with_shlex("ahds -l file.am")
self.assertTrue(args.literal)
def test_load_streams(self):
"""Test load_streams option"""
args = _parse_with_shlex("ahds -s file.am")
self.assertTrue(args.load_streams)
class TestMain(Py23FixTestCase):
"""Tests for the main ahds entry point main()"""
@classmethod
def setUpClass(cls):
cls.af_fn = os.path.join(TEST_DATA_PATH, 'test12.am')
def test_set_file_and_paths(self):
"""Test that we correctly extract file and one or more path"""
# no paths
args = _parse_with_shlex("ahds {}".format(self.af_fn))
f, p = set_file_and_paths(args)
self.assertEqual(f, self.af_fn)
self.assertEqual(p, None)
# one path
args = _parse_with_shlex("ahds {} path1".format(self.af_fn))
f, p = set_file_and_paths(args)
self.assertEqual(f, self.af_fn)
self.assertEqual(p, ['path1'])
# two paths
args = _parse_with_shlex("ahds {} path1 path2".format(self.af_fn))
f, p = set_file_and_paths(args)
self.assertEqual(f, self.af_fn)
self.assertEqual(p, ['path1', 'path2'])
# three paths
args = _parse_with_shlex("ahds {} path1 path2 path3".format(self.af_fn))
f, p = set_file_and_paths(args)
self.assertEqual(f, self.af_fn)
self.assertEqual(p, ['path1', 'path2', 'path3'])
def test_get_amira_file(self):
"""Test that we can get the Amira (R) file"""
args = _parse_with_shlex("ahds -d {}".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
self.assertIsInstance(af, ahds.AmiraFile)
def test_get_debug(self):
"""Test that we can get debug info"""
args = _parse_with_shlex("ahds -d {}".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
string = get_debug(af, args)
self.assertIsInstance(string, _str)
des = re.compile(r".*\'designation\'.*", re.S)
com = re.compile(r".*\'comment\'.*", re.S)
par = re.compile(r".*\'parameters\'.*", re.S)
dat = re.compile(r".*\'data_definitions\'.*", re.S)
des_m = des.match(string)
self.assertIsNotNone(des_m)
com_m = com.match(string)
self.assertIsNotNone(com_m)
par_m = par.match(string)
self.assertIsNotNone(par_m)
dat_m = dat.match(string)
self.assertIsNotNone(dat_m)
def test_get_literal(self):
"""Test that we can get literal header info"""
args = _parse_with_shlex("ahds -l {}".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
string = get_literal(af, args)
self.assertIsInstance(string, _str)
am = re.compile(r".*AmiraMesh 3D BINARY.*", re.S)
cd = re.compile(r".*CreationDate.*", re.S)
de = re.compile(r".*define Lattice.*", re.S)
par = re.compile(r".*Parameters.*", re.S)
mat = re.compile(r".*Materials.*", re.S)
am_m = am.match(string)
self.assertIsNotNone(am_m)
cd_m = cd.match(string)
self.assertIsNotNone(cd_m)
de_m = de.match(string)
self.assertIsNotNone(de_m)
par_m = par.match(string)
self.assertIsNotNone(par_m)
mat_m = mat.match(string)
self.assertIsNotNone(mat_m)
def test_get_paths_full(self):
"""Test that we can view the paths full"""
args = _parse_with_shlex("ahds {}".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
string = get_paths(p, af)
print(string, file=sys.stderr)
self.assertIsInstance(string, _str)
am = re.compile(r".*AmiraFile.*", re.S)
m = re.compile(r".*meta.*", re.S)
h = re.compile(r".*header.*", re.S)
ds = re.compile(r".*data_streams.*", re.S)
am_m = am.match(string)
self.assertIsNotNone(am_m)
m_m = m.match(string)
self.assertIsNotNone(m_m)
h_m = h.match(string)
self.assertIsNotNone(h_m)
ds_m = ds.match(string)
self.assertIsNotNone(ds_m)
def test_get_paths_meta(self):
"""Test that we can fiew partial paths"""
args = _parse_with_shlex("ahds {} meta.streams_loaded".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
string = get_paths(p, af)
print(string, file=sys.stderr)
self.assertIsInstance(string, _str)
am = re.compile(r".*AmiraFile.*", re.S)
m = re.compile(r".*streams_loaded.*", re.S)
am_m = am.match(string)
self.assertIsNone(am_m)
m_m = m.match(string)
self.assertIsNotNone(m_m)
def test_get_paths_header(self):
"""Test that we can fiew partial paths"""
args = _parse_with_shlex("ahds {} header.Parameters.Materials".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
string = get_paths(p, af)
self.assertIsInstance(string, _str)
am = re.compile(r".*AmiraFile.*", re.S)
m = re.compile(r".*Inside.*", re.S)
am_m = am.match(string)
self.assertIsNone(am_m)
m_m = m.match(string)
self.assertIsNotNone(m_m)
def test_get_paths_data_streams(self):
"""Test that we can fiew partial paths"""
args = _parse_with_shlex("ahds {} data_streams".format(self.af_fn))
f, p = set_file_and_paths(args)
af = get_amira_file(f, args)
string = get_paths(p, af)
self.assertIsInstance(string, _str)
am = re.compile(r".*AmiraFile.*", re.S)
m = re.compile(r".*data_streams.*", re.S)
am_m = am.match(string)
self.assertIsNone(am_m)
m_m = m.match(string)
self.assertIsNotNone(m_m)
# def test_data(self):
# """Test that the data is correctly oriented"""
# af = ahds.AmiraFile(os.path.join(TEST_DATA_PATH, 'EM04226_2_U19_Cropped_YZ_binned.labels.am'))
# af = ahds.AmiraFile(os.path.join(TEST_DATA_PATH, 'testscalar.am'))
# _print(af)
# _print(af.data_streams.Labels.data.shape)
# _print(af.header.Lattice.length)
# import h5py
# with h5py.File('EM04226_2_U19_Cropped_YZ_binned.labels.h5', 'w') as h:
# h['/data'] = af.data_streams.Labels.data
# import mrcfile
# _print(af.data_streams.Labels.data.dtype)
# _print('unique values: ', numpy.unique(af.data_streams.Labels.data.astype(numpy.float32)))
# with mrcfile.new('EM04226_2_U19_Cropped_YZ_binned.labels.mrc', overwrite=True) as m:
# m.set_data(af.data_streams.Labels.data.astype(numpy.float32))
# m.voxel_size = (9.0, 9.0, 15.0)
# m.update_header_from_data()
| 37.527273 | 104 | 0.616037 |
4d1b39d2802f9be6877c954692d8a32b5ba76060 | 7,463 | py | Python | src/spodernet/spodernet/preprocessing/vocab.py | JD-AI-Research-Silicon-Valley/SACN | dba00fec88dd20b68a08f85c129ea878d56d0375 | [
"MIT"
] | 96 | 2018-11-13T02:40:07.000Z | 2022-03-10T02:05:20.000Z | SACN/src/spodernet/spodernet/preprocessing/vocab.py | zouguojian/KGEF | ffde1085339c0f304cf269cf7b57ae593ecbf8bf | [
"MIT"
] | 22 | 2018-11-16T12:35:41.000Z | 2022-01-09T10:35:45.000Z | SACN/src/spodernet/spodernet/preprocessing/vocab.py | zouguojian/KGEF | ffde1085339c0f304cf269cf7b57ae593ecbf8bf | [
"MIT"
] | 31 | 2019-01-21T17:52:04.000Z | 2022-02-25T07:10:39.000Z | from collections import Counter
import numpy as np
import os
import time
import datetime
import pickle
import urllib
import src.bashmagic.bashmagic
import time
import json
from src.spodernet.spodernet.utils.util import get_data_path, save_data, xavier_uniform_weight
from os.path import join
from src.spodernet.spodernet.utils.util import Logger
log = Logger('vocab.py.txt')
'''This models the vocabulary and token embeddings'''
class Vocab(object):
'''Class that manages work/char embeddings'''
def __init__(self, path, vocab = Counter(), labels = {}):
'''Constructor.
Args:
vocab: Counter object with vocabulary.
'''
self.index = None
token2idx = {}
idx2token = {}
self.label2idx = {}
self.idx2label = {}
self.glove_cache = {}
for i, item in enumerate(vocab.items()):
token2idx[item[0]] = i+1
idx2token[i+1] = item[0]
for idx in labels:
self.label2idx[labels[idx]] = idx
self.idx2label[idx] = labels[idx]
# out of vocabulary token
token2idx['OOV'] = int(0)
idx2token[int(0)] = 'OOV'
# empty = 0
token2idx[''] = int(1)
idx2token[int(1)] = ''
self.token2idx = token2idx
self.idx2token = idx2token
self.path = path
if len(idx2token.keys()) > 0:
self.next_idx = int(np.max(list(idx2token.keys())) + 1)
else:
self.next_idx = int(2)
if len(self.idx2label.keys()) > 0:
self.next_label_2dx = int(int(np.max(self.idx2label.keys())) + 1)
else:
self.next_label_idx = int(0)
@property
def num_token(self):
return len(self.token2idx)
@property
def num_labels(self):
return len(self.label2idx)
def add_token(self, token):
if token not in self.token2idx:
self.token2idx[token] = self.next_idx
self.idx2token[self.next_idx] = token
self.next_idx += 1
def add_label(self, label):
if label not in self.label2idx:
self.label2idx[label] = self.next_label_idx
self.idx2label[self.next_label_idx] = label
self.next_label_idx += 1
def get_idx(self, word):
'''Gets the idx if it exists, otherwise returns -1.'''
if word in self.token2idx:
return self.token2idx[word]
else:
return self.token2idx['OOV']
def get_idx_label(self, label):
'''Gets the idx of the label'''
return self.label2idx[label]
def get_word(self, idx):
'''Gets the word if it exists, otherwise returns OOV.'''
if idx in self.idx2token:
return self.idx2token[idx]
else:
return self.idx2token[0]
def save_to_disk(self, name=''):
log.info('Saving vocab to: {0}'.format(self.path))
pickle.dump([self.token2idx, self.idx2token, self.label2idx,
self.idx2label], open(self.path + name, 'wb'))
def load_from_disk(self, name=''):
if not os.path.exists(self.path + name):
return False
timestamp = time.ctime(os.path.getmtime(self.path + name))
timestamp = datetime.datetime.strptime(timestamp, '%a %b %d %H:%M:%S %Y')
age_in_hours = (datetime.datetime.now() - timestamp).seconds/60./60.
log.info('Loading vocab from: {0}'.format(self.path + name))
self.token2idx, self.idx2token, self.label2idx, self.idx2label = pickle.load(open(self.path, 'rb'))
if age_in_hours > 12:
log.info('Vocabulary outdated: {0}'.format(self.path + name))
return False
else:
return True
def download_glove(self):
if not os.path.exists(join(get_data_path(), 'glove')):
log.info('Glove data is missing, dowloading data now...')
os.mkdir(join(get_data_path(), 'glove'))
bashmagic.wget("http://nlp.stanford.edu/data/glove.6B.zip", join(get_data_path(),'glove'))
bashmagic.unzip(join(get_data_path(), 'glove', 'glove.6B.zip'), join(get_data_path(), 'glove'))
def prepare_glove(self, dimension):
if self.index is not None: return
if not os.path.exists(join(get_data_path(), 'glove', 'index_50.p')):
dims = [50, 100, 200, 300]
base_filename = 'glove.6B.{0}d.txt'
paths = [join(get_data_path(), 'glove', base_filename.format(dim)) for dim in dims]
for path, dim in zip(paths, dims):
index = {}
index = {'PATH' : path}
with open(path, 'rb') as f:
log.info('Building index for {0}', path)
while True:
prev_pos = f.tell()
line = f.readline().decode('utf-8')
if line == '': break
next_pos = f.tell()
data = line.strip().split(' ')
token = data[0]
index[token] = (prev_pos, next_pos)
log.info('Saving glove index...')
json.dump(index, open(join(get_data_path(), 'glove', 'index_{0}.p'.format(dim)), 'w'))
log.info('Loading glove index...')
self.index = json.load(open(join(get_data_path(), 'glove', 'index_{0}.p'.format(dimension)), 'r'))
def load_matrix(self, dim):
log.info('Initializing glove matrix...')
X = xavier_uniform_weight(len(self.token2idx), dim)
log.info('Loading vectors into glove matrix with dimension: {0}', X.shape)
pretrained_count = 0
n = len(self.token2idx)-2
for i, (token, idx) in enumerate(self.token2idx.items()):
if i % 10000 == 0: print(i)
vec = self.get_glove_list(token, dim)
if vec is not None:
X[idx] = vec
pretrained_count += 1
log.info('Filled matrix with {0} pretrained embeddings and {1} xavier uniform initialized embeddings.', pretrained_count, n-pretrained_count)
return X
def get_glove_vector(self, token, dimension=300):
if token in self.glove_cache: return self.glove_cache[token]
vec = self.get_glove_list(token, dimension)
if vec is not None:
arr = np.array(vec, dtype=np.float32)
self.glove_cache[token] = arr
return arr
else: return None
def get_glove_list(self, token, dimension=300):
assert dimension in [50, 100, 200, 300], 'Dimension not supported! Only dimension 50, 100, 200, and 300 are supported!'
self.download_glove()
self.prepare_glove(dimension)
vec = None
if token in self.index:
p = self.index['PATH']
with open(p, 'rb') as f:
start, end = self.index[token]
f.seek(start)
line = f.read(end-start).decode('utf-8')
data = line.strip().split(' ')
vec = data[1:]
return vec
def exists_in_glove(self, token, dimension=300):
self.download_glove()
self.prepare_glove(dimension)
return token in self.index
def get_glove_matrix(self, dimension):
assert dimension in [50, 100, 200, 300], 'Dimension not supported! Only dimension 50, 100, 200, and 300 are supported!'
self.download_glove()
return self.load_matrix(dimension)
| 36.583333 | 149 | 0.575774 |
b1202101132aec237923723e17cb60daf096707b | 16,235 | py | Python | plots/psd_analysis.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | null | null | null | plots/psd_analysis.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:21:18.000Z | 2021-11-30T16:21:18.000Z | plots/psd_analysis.py | SAKEverse/sake-plot | a08973222109981b36d204a754d0bf34d95be192 | [
"Apache-2.0"
] | null | null | null | ########## ------------------------------- IMPORTS ------------------------ ##########
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde
from beartype import beartype
from typing import TypeVar
PandasDf = TypeVar('pandas.core.frame.DataFrame')
from processing.stft import get_freq_index
########## ---------------------------------------------------------------- ##########
@beartype
def get_power_area(pmat:np.ndarray, freq_vec:np.ndarray, freqs:np.ndarray) -> np.ndarray:
"""
Get power area across frequencies.
Parameters
----------
pmat : np.ndarray, 2D array containing power values rows = frequency bins and cols = time bins
freq_vec : np.ndarray, vector with real frequency values
freqs : np.ndarray, 2D array with frequencies, rows = different frequency ranges, colums = [start, stop]
Returns
-------
powers : 1D np.array, len = frequency ranges
"""
# get frequency index
freqs = freqs.reshape([-1, 2]) # reshape to 2D
freq_idx = get_freq_index(freq_vec, freqs)
# init empty array to store powers
powers = np.zeros(freq_idx.shape[0])
for i in range(freq_idx.shape[0]):
powers[i] = np.mean(pmat[freq_idx[i,0]:freq_idx[i,1],:])
return powers
@beartype
def get_power_ratio(pmat:np.ndarray, freq_vec:np.ndarray, freqs:np.ndarray) -> np.ndarray:
"""
Get power ratio across frequencies.
Parameters
----------
pmat : np.ndarray, 2D array containing power values rows = frequency bins and cols = time bins
freq_vec : np.ndarray, vector with real frequency values
freqs : np.ndarray, 3D array with frequencies, 1d = frequency ratios, 2d [lower to upper freq range], 3d = [start, stop]
Returns
-------
powers : 1D np.array, len = frequency ranges
"""
# get frequency index
freqs = freqs.reshape([-1,2,2]) # reshape to 3D
freq_idx = get_freq_index(freq_vec, freqs)
# init empty array to store powers
powers = np.zeros(freq_idx.shape[0])
for i in range(freq_idx.shape[0]):
powers[i] = np.divide(np.mean(pmat[freq_idx[i,0,0]:freq_idx[i,0,1],:]),
np.mean(pmat[freq_idx[i,1,0]:freq_idx[i,1,1],:]))
return powers
def melted_power_area(index_df:PandasDf, power_df:PandasDf, freqs:list, selected_categories:list):
"""
Get power area and melt dataframe for seaborn plotting.
Parameters
----------
index_df : PandasDf, experiment index
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
freqs : list, 2D list with frequency ranges for extraction of power area
selected_categories : list, columns that will be included in the melted
Returns
-------
df : PandasDf, melted df with power area and categories
"""
# convert to numpy array
freqs = np.array(freqs)
# create frequency column names
freq_columns = []
for i in range(freqs.shape[0]):
freq_columns.append(str(freqs[i,0]) + ' - ' + str(freqs[i,1]) + ' Hz')
# create array for storage
power_array = np.empty((len(index_df), freqs.shape[0]))
for i in range(len(index_df)): # iterate over dataframe
# get power across frequencies
power_array[i,:] = get_power_area(power_df['pmat'][i], power_df['freq'][i], freqs)
# concatenate to array
index_df = pd.concat([index_df, pd.DataFrame(data = power_array, columns = freq_columns)], axis=1)
# set file id as index
index_df['id'] = index_df['animal_id'].astype(str) + index_df['file_id'].astype(str)
index_df.set_index('id', inplace = True)
# melt dataframe for seaborn plotting
df = pd.melt(index_df, id_vars = selected_categories, value_vars = freq_columns, var_name = 'freq', value_name = 'power_area',
ignore_index = False)
return df
def melted_power_ratio(index_df:PandasDf, power_df:PandasDf, freqs:list, selected_categories:list):
"""
Get power ratio and melt dataframe for seaborn plotting.
Parameters
----------
index_df : PandasDf, experiment index
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
freqs : list, 2D list with frequency ranges for extraction of power area
selected_categories : list, columns that will be included in the melted
Returns
-------
df : PandasDf, melted df with power area and categories
"""
# convert to numpy array
freqs = np.array(freqs)
# create frequency column names
freq_columns = []
for i in range(freqs.shape[0]):
freq_columns.append(str(freqs[i,0]) + ' - ' + str(freqs[i,1]) + ' Hz')
# create array for storage
power_array = np.empty((len(index_df), freqs.shape[0]))
for i in range(len(index_df)): # iterate over dataframe
# get power across frequencies
power_array[i,:] = get_power_ratio(power_df['pmat'][i], power_df['freq'][i], freqs)
# concatenate to array
index_df = pd.concat([index_df, pd.DataFrame(data = power_array, columns = freq_columns)], axis=1)
# set file id as index
index_df['id'] = index_df['animal_id'].astype(str) + index_df['file_id'].astype(str)
index_df.set_index('id', inplace = True)
# melt dataframe for seaborn plotting
df = pd.melt(index_df, id_vars = selected_categories, value_vars = freq_columns, var_name = 'freq', value_name = 'power_ratio',
ignore_index=False)
return df
def melted_psds(index_df:PandasDf, power_df:PandasDf, freq_range:list, selected_categories:list): ## don't drop file index
"""
Get PSD and melt dataframe for seaborn plotting.
Parameters
----------
index_df : PandasDf, experiment index
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
freqs : list, 2D list with frequency ranges for extraction of power area
selected_categories : list, columns that will be included in the melted
Returns
-------
df : PandasDf, melted df with psd and categories
"""
# create arrays for storage
power_array = np.array([])
freq_array = np.array([])
repeat_array = np.zeros(len(index_df))
# get selected columns
index_df['id'] = index_df['animal_id'].astype(str) + index_df['file_id'].astype(str)
df = index_df[['id'] + selected_categories]
for i in range(len(index_df)): # iterate over dataframe
# unpack frequency and power
freq = power_df['freq'][i]
power = power_df['pmat'][i]
# get desired frequency index
f_idx = get_freq_index(freq, freq_range)
freq = freq[f_idx[0]:f_idx[1]+1]
power = np.mean(power[f_idx[0]:f_idx[1]+1,:], axis =1)
# append to array
power_array = np.concatenate((power_array, power))
freq_array = np.concatenate((freq_array, freq ))
# get length
repeat_array[i] = freq.shape[0]
# repeat array
df = df.reindex(df.index.repeat(repeat_array))
# set file id as index
df.set_index('id', inplace = True)
# append to dataframe
df['freq'] = freq_array
df['power'] = power_array
return df
def melted_power_dist(index_df:PandasDf, power_df:PandasDf, freq_range:list, selected_categories:list): ## don't drop file index
"""
Get Power distribution and melt dataframe for seaborn plotting.
Parameters
----------
index_df : PandasDf, experiment index
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
freqs : list, 2D list with frequency ranges for extraction of power area
selected_categories : list, columns that will be included in the melted
Returns
-------
df : PandasDf, melted df with pdf and categories
"""
# create arrays for storage
power_array = np.array([])
density_array = np.array([])
threshold_array = np.array([])
repeat_array = np.zeros(len(index_df))
# get selected columns
index_df['id'] = index_df['animal_id'].astype(str) + index_df['file_id'].astype(str)
df = index_df[['id'] + selected_categories]
# get all power areas
for i in range(len(index_df)): # iterate over dataframe
# unpack frequency and power
freq = power_df['freq'][i]
power = power_df['pmat'][i]
# get desired frequency index
f_idx = get_freq_index(freq, freq_range)
freq = freq[f_idx[0]:f_idx[1]+1]
power = np.mean(power[f_idx[0]:f_idx[1]+1,:], axis = 0)
power_df.at[i, 'pmat'] = power
# append to array
power_array = np.concatenate((power_array, power))
# get mean and sdev for normalization
avg = np.mean(power_array)
sdev = np.std(power_array)
# define edges for z normalized data and preallocate power_array
power_array = np.array([])
edges = np.linspace(-5, 5, 100)
for i in range(len(index_df)):
# normalize
power = (power_df['pmat'][i] - avg )/ sdev
# select power above threshold
threshold = np.mean(power) + np.std(power)
# get kde
pdf = gaussian_kde(power, bw_method = 1).evaluate(edges)
# append to array
power_array = np.concatenate((power_array, edges))
density_array = np.concatenate((density_array, pdf))
threshold_array = np.concatenate((threshold_array, np.repeat(threshold, edges.shape[0])))
# get length
repeat_array[i] = edges.shape[0]
# repeat array
df = df.reindex(df.index.repeat(repeat_array))
# set file id as index
df.set_index('id', inplace = True)
# append to dataframe
df['threshold'] = threshold_array
df['power'] = power_array
df['density'] = density_array
return df
def norm_power(index_df, power_df, selection):
"""
Normalize power by PSDs of selected condition, drop non matching conditions
Parameters
----------
index_df : PandasDf, experiment index
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
selection : List/Tuple, (column name, baseline group)
Returns
-------
index_df : PandasDf, with dropped indices where conditions are missing
power_df : PandasDf, with dropped indices where conditions are missing
"""
unique_id = 'animal_id'
category, group = selection
# get number unique entries
unique_groups = index_df[category].unique()
unique_regions = index_df['brain_region'].unique()
unique_ids = index_df[unique_id].unique()
# iterate over brain regions
for region in unique_regions:
# iterate over unique ids
for uid in unique_ids:
# get matching idx
matching_entries = index_df[(index_df[unique_id] == uid) & (index_df['brain_region'] == region)]
# drop groups that are not complete
if len(matching_entries) < len(unique_groups):
power_df = power_df.drop(matching_entries.index, axis=0)
index_df = index_df.drop(matching_entries.index, axis=0)
else:
# get baseline psd
base_idx = matching_entries[matching_entries[category] == group].index[0]
base_psd = np.mean(power_df.pmat[base_idx], axis=1)
# divide matching groups by baseline psd
for i in matching_entries.index:
power_df.at[i, 'pmat'] = power_df['pmat'][i] / base_psd[:,None]
return index_df.reset_index().drop(['index'], axis=1), power_df.reset_index().drop(['index'], axis=1)
def norm_power_unpaired(index_df, power_df, selection):
"""
Normalize power by PSDs of selected condition, drop non matching conditions
Parameters
----------
index_df : PandasDf, experiment index
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
selection : List/Tuple, (column name, baseline group)
Returns
-------
index_df : PandasDf, with dropped indices where conditions are missing
power_df : PandasDf, with dropped indices where conditions are missing
"""
unique_id = 'animal_id'
category, group = selection
# get number unique entries
unique_groups = index_df[category].unique()
unique_regions = index_df['brain_region'].unique()
unique_ids = index_df[unique_id].unique()
# iterate over brain regions
for region in unique_regions:
# iterate over unique ids
for uid in unique_ids:
# get matching idx
matching_entries = index_df[(index_df[unique_id] == uid) & (index_df['brain_region'] == region)]
# drop groups that are not complete
if (matching_entries[category] == group).sum() == 0:
power_df = power_df.drop(matching_entries.index, axis=0)
index_df = index_df.drop(matching_entries.index, axis=0)
else:
# get baseline psd
base_idx = matching_entries[matching_entries[category] == group].index[0]
base_psd = np.mean(power_df.pmat[base_idx], axis=1)
# divide matching groups by baseline psd
for i in matching_entries.index:
power_df.at[i, 'pmat'] = power_df['pmat'][i] / base_psd[:,None]
return index_df.reset_index().drop(['index'], axis=1), power_df.reset_index().drop(['index'], axis=1)
def norm_mean_power(power_df):
"""
Normalize based on mean power.
Parameters
----------
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
Returns
-------
power_df : PandasDf, contains pmat and frequency vectors for every row of index_df
"""
power_df['pmat'] = power_df['pmat'].apply(lambda pmat: pmat/np.mean(pmat))
return power_df
if __name__ == '__main__':
import os, yaml
from load_index import load_index
# from facet_plot_gui import GridGraph
### ---------------------- USER INPUT -------------------------------- ###
## define path and conditions for filtering
parent_folder = r'\\SUPERCOMPUTER2\Shared\acute_allo'
## define frequencies of interest
with open('settings.yaml', 'r') as file:
settings = yaml.load(file, Loader=yaml.FullLoader)
### ---------------------------------------------------------------- ####
## load data frame
index_df = load_index(os.path.join(parent_folder, 'index.csv'))
# get power
# _, power_df = get_pmat(index_df, settings)
# power_df.to_pickle(os.path.join(parent_folder, 'power_mat.pickle'))
# power_df = pd.read_pickle(os.path.join(parent_folder, 'power_mat_verified.pickle'))
# normalize to baseline
# index_df, power_df = norm_power(index_df, power_df, ['treatment', 'baseline1'])
# df = melted_power_dist(index_df, power_df, [30,70], ['sex', 'treatment', 'brain_region'])
# df = melted_power_ratio(index_df, power_df, settings['freq_ratios'], ['sex', 'treatment', 'brain_region']) #
# import seaborn as sns
# get melted power area
# data = melted_power_area(index_df, power_df, settings['freq_ranges'], ['sex', 'treatment', 'brain_region'])
# GridGraph(parent_folder, 'test.csv', data).draw_graph('box')
# sns.catplot(data = df, x = 'freq', y = 'power_area', hue = 'treatment', col = 'sex', row = 'brain_region', kind = 'box')
# # get melted psd
# data = melted_psds(index_df, power_df, [1, 120], ['sex', 'treatment', 'brain_region'])
# GridGraph(parent_folder, 'test.csv', data).draw_psd()
# df.to_csv('melted_psd.csv',index=True)
# path = r'C:\Users\panton01\Desktop\pydsp_analysis'
# filename = 'power_area_df.csv'
# df.to_csv(os.path.join(path, filename), index = False)
# graph = GridGraph(path, filename)
# graph.draw_graph('violin')
| 33.964435 | 131 | 0.623899 |
edc2dd8eb3b8ed3ce10e7a168444951ca62e1b14 | 13,431 | py | Python | tools/fidl/difl/comparator.py | wwjiang007/fuchsia-1 | 0db66b52b5bcd3e27c8b8c2163925309e8522f94 | [
"BSD-2-Clause"
] | 210 | 2019-02-05T12:45:09.000Z | 2022-03-28T07:59:06.000Z | tools/fidl/difl/comparator.py | wwjiang007/fuchsia-1 | 0db66b52b5bcd3e27c8b8c2163925309e8522f94 | [
"BSD-2-Clause"
] | 56 | 2021-06-03T03:16:25.000Z | 2022-03-20T01:07:44.000Z | tools/fidl/difl/comparator.py | wwjiang007/fuchsia-1 | 0db66b52b5bcd3e27c8b8c2163925309e8522f94 | [
"BSD-2-Clause"
] | 73 | 2019-03-06T18:55:23.000Z | 2022-03-26T12:04:51.000Z | # Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from difl.ir import *
import typing
__all__ = ['Comparator']
class Comparator:
def __init__(self):
self.identifier_shapes_match: typing.Dict[str, bool] = {}
self.identifier_constraints_match: typing.Dict[str, bool] = {}
# notice cycles when comparing shapes & contraints
self.shape_match_stack: typing.List[str] = []
self.constraint_match_stack: typing.List[str] = []
def shapes_match(self, before: Type, after: Type) -> bool:
'''
Compares two types for shape
'''
if isinstance(before, IdentifierType) and \
isinstance(after, IdentifierType) and \
before.identifier == after.identifier and \
not before.is_nullable and not before.is_nullable:
if before.identifier not in self.identifier_shapes_match:
assert before.identifier not in self.shape_match_stack
self.shape_match_stack.append(before.identifier)
self.identifier_shapes_match[
before.identifier] = self._shapes_match(before, after)
assert before.identifier == self.shape_match_stack.pop()
return self.identifier_shapes_match[before.identifier]
return self._shapes_match(before, after)
def _shapes_match(self, before: Type, after: Type) -> bool:
# identical types are identical
if before == after and not isinstance(before, IdentifierType):
return True
# different sized types are incompatible
if before.inline_size != after.inline_size:
return False
########## Handles, Protocols and Requests
# handles are compatible with handles
if isinstance(before, (ProtocolIdentifierType, RequestType, HandleType)) and \
isinstance(after, (ProtocolIdentifierType, RequestType, HandleType)):
return True
########## Primitives
# compare primitives
if isinstance(before, PrimitiveType) and \
isinstance(after, PrimitiveType):
return before.inline_size == after.inline_size and before.is_float == after.is_float
########## Enums and Bits
# compare enums, bits and integer primitives
if isinstance(before, (PrimitiveType, EnumIdentifierType, BitsIdentifierType)) and \
isinstance(after, (PrimitiveType, EnumIdentifierType, BitsIdentifierType)):
# get the primitive or underlying type
b_prim = before if isinstance(before,
PrimitiveType) else before.primitive
a_prim = after if isinstance(after,
PrimitiveType) else after.primitive
assert b_prim.inline_size == a_prim.inline_size
return b_prim.is_float == a_prim.is_float
########## Arrays
if isinstance(before, ArrayType) != isinstance(after, ArrayType):
# arrays and not-arrays are incompatible
return False
if isinstance(before, ArrayType) and isinstance(after, ArrayType):
if before.count != after.count:
# changing the size is incompatible
return False
# compatibility is based on the member types
return self.shapes_match(before.element_type, after.element_type)
########## Vectors and Strings
if isinstance(before, (VectorType, StringType)) and \
isinstance(after, (VectorType, StringType)):
return self.shapes_match(before.element_type, after.element_type)
########## Identifiers
if isinstance(before, IdentifierType) and \
isinstance(after, IdentifierType):
if type(before) != type(after):
# identifier types changing is a different shape
return False
if before.identifier != after.identifier:
# TODO: deal with renames?
return False
if isinstance(before, (XUnionIdentifierType, TableIdentifierType)):
# never a shape change
return True
if before.is_nullable or after.is_nullable:
if before.is_nullable != after.is_nullable:
if isinstance(before, XUnionIdentifierType):
# Nullability is soft change for xunions
return True
else:
# No other types should have nullability
assert isinstance(
before,
(StructIdentifierType, UnionIdentifierType))
# Nullability changes layout for structs and unions
return False
else:
# both nullable, no layout change
return True
# both not-nullable
if isinstance(before, StructIdentifierType) and \
isinstance(after, StructIdentifierType):
# TODO: support shape-compatible struct member changes here? like joins & splits?
b_members = before.declaration.members
a_members = after.declaration.members
if len(b_members) != len(a_members):
return False
if len(b_members) == 0:
# all empty structs are the same
return True
return all(
self.shapes_match(b.type, a.type)
for b, a in zip(b_members, a_members))
if isinstance(before, UnionIdentifierType) and \
isinstance(after, UnionIdentifierType):
b_union_members = before.declaration.members
a_union_members = after.declaration.members
if len(b_union_members) != len(a_union_members):
return False
return all(
self.shapes_match(b.type, a.type)
for b, a in zip(b_union_members, a_union_members))
raise NotImplementedError(
"Don't know how to compare shape for %r (%r) and %r (%r)" %
(type(before), before, type(after), after))
def constraints_match(self, before: Type, after: Type) -> bool:
'''
Compares two types for constraints
'''
if isinstance(before, IdentifierType) and \
isinstance(after, IdentifierType) and \
before.identifier == after.identifier:
if before.identifier not in self.identifier_constraints_match:
if before.identifier in self.constraint_match_stack:
# hit a cycle
return True
self.constraint_match_stack.append(before.identifier)
self.identifier_constraints_match[before.identifier] = \
self._constraints_match(before, after)
assert before.identifier == self.constraint_match_stack.pop()
return self.identifier_constraints_match[before.identifier]
return self._constraints_match(before, after)
def _constraints_match(self, before: Type, after: Type) -> bool:
if not self.shapes_match(before, after):
# shape is the ultimate constraint
return False
if type(before) != type(after):
# changing the type of the type breaks constraints
return False
########## Primitives
if isinstance(before, PrimitiveType) and \
isinstance(after, PrimitiveType):
return before.subtype == after.subtype
########## Strings
if isinstance(before, StringType) and isinstance(after, StringType):
return before.limit == after.limit and \
before.is_nullable == after.is_nullable
########## Vectors
if isinstance(before, VectorType) and isinstance(after, VectorType):
return before.limit == after.limit and \
before.is_nullable == after.is_nullable and \
self.constraints_match(before.element_type, after.element_type)
########## Arrays
if isinstance(before, ArrayType) and isinstance(after, ArrayType):
assert before.count == after.count
return self.constraints_match(before.element_type,
after.element_type)
########## Handles
if isinstance(before, HandleType) and isinstance(after, HandleType):
return before.handle_type == after.handle_type and \
before.is_nullable == after.is_nullable
if isinstance(before, NullableType) and \
isinstance(after, NullableType):
# nullability changes are constraints changes
if before.is_nullable != after.is_nullable:
return False
if isinstance(before, RequestType) and isinstance(after, RequestType):
return before.protocol == after.protocol
if isinstance(before, ProtocolIdentifierType) and \
isinstance(after, ProtocolIdentifierType):
return before.identifier == after.identifier
if isinstance(before, StructIdentifierType) and \
isinstance(after, StructIdentifierType):
b_struct_members = before.declaration.members
a_struct_members = after.declaration.members
assert len(b_struct_members) == len(a_struct_members)
if len(b_struct_members) == 0:
# all empty structs are the same
return True
return all(
self.constraints_match(b.type, a.type)
for b, a in zip(b_struct_members, a_struct_members))
if isinstance(before, TableIdentifierType) and \
isinstance(after, TableIdentifierType):
b_table_members: typing.Dict[int, TableMember] = {
m.ordinal: m
for m in before.declaration.members
}
a_table_members: typing.Dict[int, TableMember] = {
m.ordinal: m
for m in after.declaration.members
}
for ordinal, b_member in b_table_members.items():
a_member = a_table_members.get(ordinal)
if a_member is None:
# leaving out an ordinal breaks constraints
return False
if b_member.reserved or a_member.reserved:
# changing to/from reserved is fine
continue
if not self.constraints_match(b_member.type, a_member.type):
return False
# it's fine if more members were added to after
return True
if isinstance(before, UnionIdentifierType) and \
isinstance(after, UnionIdentifierType):
b_union_members = before.declaration.members
a_union_members = after.declaration.members
if len(b_union_members) != len(a_union_members):
return False
# empty unions are illegal
assert len(b_union_members) != 0
return all(
self.constraints_match(b.type, a.type)
for b, a in zip(b_union_members, a_union_members))
if isinstance(before, XUnionIdentifierType) and \
isinstance(after, XUnionIdentifierType):
# Note: this is applying a strict-mode interpretation
b_xunion_members = before.declaration.members
a_xunion_members = after.declaration.members
if len(b_xunion_members) != len(a_xunion_members):
return False
# empty xunions are illegal
assert len(b_xunion_members) > 0
# members by ordinal
b_members = {m.ordinal: m for m in b_xunion_members}
a_members = {m.ordinal: m for m in a_xunion_members}
# they both have the same set of ordinals
if frozenset(b_members.keys()) != frozenset(a_members.keys()):
return False
return all(
self.constraints_match(b_members[o].type, a_members[o].type)
for o in b_members.keys())
if isinstance(before, EnumIdentifierType) and \
isinstance(after, EnumIdentifierType):
# this is the strict-mode interpretation of enums
assert len(before.declaration.members) == \
len(after.declaration.members)
before_member_values = set(
m.value for m in before.declaration.members)
after_member_values = set(
m.value for m in after.declaration.members)
return before_member_values == after_member_values
if isinstance(before, BitsIdentifierType) and \
isinstance(after, BitsIdentifierType):
# this is the strict-mode interpretation of bits
return before.declaration.mask == after.declaration.mask
raise NotImplementedError(
"Don't know how to compare constraints for %r (%r) and %r (%r)" %
(type(before), before, type(after), after))
| 44.77 | 97 | 0.589755 |
9223b4eb6e7bcc67d48886b46d7f3df0cd8c7ad6 | 44,952 | py | Python | openmdao/jacobians/tests/test_jacobian.py | friedenhe/OpenMDAO | db1d7e22a8bf9f66afa82ec3544b7244d5545f6d | [
"Apache-2.0"
] | null | null | null | openmdao/jacobians/tests/test_jacobian.py | friedenhe/OpenMDAO | db1d7e22a8bf9f66afa82ec3544b7244d5545f6d | [
"Apache-2.0"
] | 2 | 2021-06-10T15:29:27.000Z | 2021-08-18T20:07:56.000Z | openmdao/jacobians/tests/test_jacobian.py | friedenhe/OpenMDAO | db1d7e22a8bf9f66afa82ec3544b7244d5545f6d | [
"Apache-2.0"
] | null | null | null | """ Test the Jacobian objects."""
import itertools
import sys
import unittest
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from openmdao.api import IndepVarComp, Group, Problem, \
ExplicitComponent, ImplicitComponent, ExecComp, \
NewtonSolver, ScipyKrylov, \
LinearBlockGS, DirectSolver
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from openmdao.utils.array_utils import rand_sparsity
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.api import ScipyOptimizeDriver
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
class MyExplicitComp(ExplicitComponent):
def __init__(self, jac_type):
super().__init__()
self._jac_type = jac_type
def setup(self):
self.add_input('x', val=np.zeros(2))
self.add_input('y', val=np.zeros(2))
self.add_output('f', val=np.zeros(2))
val = self._jac_type(np.array([[1., 1.], [1., 1.]]))
if isinstance(val, list):
self.declare_partials('f', ['x','y'], rows=val[1], cols=val[2], val=val[0])
else:
self.declare_partials('f', ['x','y'], val=val)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'][0] = (x[0]-3.0)**2 + x[0]*x[1] + (x[1]+4.0)**2 - 3.0 + \
y[0]*17. - y[0]*y[1] + 2.*y[1]
outputs['f'][1] = outputs['f'][0]*3.0
def compute_partials(self, inputs, partials):
x = inputs['x']
y = inputs['y']
jac1 = self._jac_type(np.array([
[2.0*x[0] - 6.0 + x[1], 2.0*x[1] + 8.0 + x[0]],
[(2.0*x[0] - 6.0 + x[1])*3., (2.0*x[1] + 8.0 + x[0])*3.]
]))
if isinstance(jac1, list):
jac1 = jac1[0]
partials['f', 'x'] = jac1
jac2 = self._jac_type(np.array([
[17.-y[1], 2.-y[0]],
[(17.-y[1])*3., (2.-y[0])*3.]
]))
if isinstance(jac2, list):
jac2 = jac2[0]
partials['f', 'y'] = jac2
class MyExplicitComp2(ExplicitComponent):
def __init__(self, jac_type):
super().__init__()
self._jac_type = jac_type
def setup(self):
self.add_input('w', val=np.zeros(3))
self.add_input('z', val=0.0)
self.add_output('f', val=0.0)
val = self._jac_type(np.array([[7.]]))
if isinstance(val, list):
self.declare_partials('f', 'z', rows=val[1], cols=val[2], val=val[0])
else:
self.declare_partials('f', 'z', val=val)
val = self._jac_type(np.array([[1., 1., 1.]]))
if isinstance(val, list):
self.declare_partials('f', 'w', rows=val[1], cols=val[2], val=val[0])
else:
self.declare_partials('f', 'w', val=val)
def compute(self, inputs, outputs):
w = inputs['w']
z = inputs['z']
outputs['f'] = (w[0]-5.0)**2 + (w[1]+1.0)**2 + w[2]*6. + z*7.
def compute_partials(self, inputs, partials):
w = inputs['w']
z = inputs['z']
jac = self._jac_type(np.array([[
2.0*w[0] - 10.0,
2.0*w[1] + 2.0,
6.
]]))
if isinstance(jac, list):
jac = jac[0]
partials['f', 'w'] = jac
class ExplicitSetItemComp(ExplicitComponent):
def __init__(self, dtype, value, shape, constructor):
self._dtype = dtype
self._shape = shape
self._value = value
self._constructor = constructor
super().__init__()
def setup(self):
if self._shape == 'scalar':
in_val = 1
out_val = 1
elif self._shape == '1D_array':
in_val = np.array([1])
out_val = np.array([1, 2, 3, 4, 5])
elif self._shape == '2D_array':
in_val = np.array([1, 2, 3])
out_val = np.array([1, 2, 3])
if self._dtype == 'int':
scale = 1
elif self._dtype == 'float':
scale = 1.
elif self._dtype == 'complex':
scale = 1j
self.add_input('in', val=in_val*scale)
self.add_output('out', val=out_val*scale)
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, partials):
partials['out', 'in'] = self._constructor(self._value)
class SimpleCompWithPrintPartials(ExplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0, upper=1.0)
self.declare_partials(of='*', wrt='*')
self.count = 0
self.partials_name_pairs = []
self.partials_values = []
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
def compute_partials(self, inputs, partials):
x = inputs['x']
y = inputs['y']
partials['f_xy', 'x'] = 2.0*x - 6.0 + y
partials['f_xy', 'y'] = 2.0*y + 8.0 + x
if self.count < 1: # Only want to save these this once for the test
for k in partials:
self.partials_name_pairs.append(k)
for k, v in partials.items():
self.partials_values.append((k,v))
self.count += 1
def arr2list(arr):
"""Convert a numpy array to a 'sparse' list."""
data = []
rows = []
cols = []
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
rows.append(row)
cols.append(col)
data.append(arr[row, col])
return [np.array(data), np.array(rows), np.array(cols)]
def arr2revlist(arr):
"""Convert a numpy array to a 'sparse' list in reverse order."""
lst = arr2list(arr)
return [lst[0][::-1], lst[1][::-1], lst[2][::-1]]
def inverted_coo(arr):
"""Convert an ordered coo matrix into one with columns in reverse order
so we can test unsorted coo matrices.
"""
shape = arr.shape
arr = coo_matrix(arr)
return coo_matrix((arr.data[::-1], (arr.row[::-1], arr.col[::-1])), shape=shape)
def inverted_csr(arr):
"""Convert an ordered coo matrix into a csr with columns in reverse order
so we can test unsorted csr matrices.
"""
return inverted_coo(arr).tocsr()
def _test_func_name(func, num, param):
args = []
for p in param.args:
try:
arg = p.__name__
except:
arg = str(p)
args.append(arg)
return 'test_jacobian_src_indices_' + '_'.join(args)
class TestJacobian(unittest.TestCase):
@parameterized.expand(itertools.product(
['dense', 'csc'],
[np.array, coo_matrix, csr_matrix, inverted_coo, inverted_csr, arr2list, arr2revlist],
[False, True], # not nested, nested
[0, 1], # extra calls to linearize
), name_func=_test_func_name
)
def test_src_indices(self, assembled_jac, comp_jac_class, nested, lincalls):
self._setup_model(assembled_jac, comp_jac_class, nested, lincalls)
# if we multiply our jacobian (at x,y = ones) by our work vec of 1's,
# we get fwd_check
fwd_check = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, 24., 74., 8.])
# if we multiply our jacobian's transpose by our work vec of 1's,
# we get rev_check
rev_check = np.array([35., 5., -9., 63., 3., -1., 6., -1.])
self._check_fwd(self.prob, fwd_check)
# to catch issues with constant subjacobians, repeatedly call linearize
for i in range(lincalls):
self.prob.model.run_linearize()
self._check_fwd(self.prob, fwd_check)
self._check_rev(self.prob, rev_check)
# Make sure that checking jacobian works for sparse subjacs.
if comp_jac_class in [coo_matrix, csr_matrix, inverted_coo, inverted_csr]:
partials = self.prob.check_partials(out_stream=None)
assert_check_partials(partials, atol=1e-5, rtol=1e-5)
def _setup_model(self, assembled_jac, comp_jac_class, nested, lincalls):
self.prob = prob = Problem()
if nested:
top = prob.model.add_subsystem('G1', Group())
else:
top = prob.model
indep = top.add_subsystem('indep', IndepVarComp())
indep.add_output('a', val=np.ones(3))
indep.add_output('b', val=np.ones(2))
top.add_subsystem('C1', MyExplicitComp(comp_jac_class))
top.add_subsystem('C2', MyExplicitComp2(comp_jac_class))
top.connect('indep.a', 'C1.x', src_indices=[2,0])
top.connect('indep.b', 'C1.y')
top.connect('indep.a', 'C2.w', src_indices=[0,2,1])
top.connect('C1.f', 'C2.z', src_indices=[1])
top.nonlinear_solver = NewtonSolver(solve_subsystems=False)
top.nonlinear_solver.linear_solver = ScipyKrylov(maxiter=100)
top.linear_solver = ScipyKrylov(
maxiter=200, atol=1e-10, rtol=1e-10, assemble_jac=True)
top.options['assembled_jac_type'] = assembled_jac
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
def _check_fwd(self, prob, check_vec):
d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()
work = np.ones(d_outputs._data.size)
# fwd apply_linear test
d_outputs.set_val(1.0)
prob.model.run_apply_linear('fwd')
d_residuals.set_val(d_residuals.asarray() - check_vec)
self.assertAlmostEqual(d_residuals.get_norm(), 0)
# fwd solve_linear test
d_outputs.set_val(0.0)
d_residuals.set_val(check_vec)
prob.model.run_solve_linear('fwd')
d_outputs -= work
self.assertAlmostEqual(d_outputs.get_norm(), 0, delta=1e-6)
def _check_rev(self, prob, check_vec):
d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()
work = np.ones(d_outputs._data.size)
# rev apply_linear test
d_residuals.set_val(1.0)
prob.model.run_apply_linear('rev')
d_outputs.set_val(d_outputs.asarray() - check_vec)
self.assertAlmostEqual(d_outputs.get_norm(), 0)
# rev solve_linear test
d_residuals.set_val(0.0)
d_outputs.set_val(check_vec)
prob.model.run_solve_linear('rev')
d_residuals -= work
self.assertAlmostEqual(d_residuals.get_norm(), 0, delta=1e-6)
dtypes = [
('int', 1),
('float', 2.1),
# ('complex', 3.2 + 1.1j), # TODO: enable when Vectors support complex entries.
]
shapes = [
('scalar', lambda x: x, (1, 1)),
('1D_array', lambda x: np.array([x + i for i in range(5)]), (5, 1)),
('2D_array', lambda x: np.array([[x + i + 2 * j for i in range(3)] for j in range(3)]),
(3, 3))
]
@parameterized.expand(itertools.product(dtypes, shapes), name_func=
lambda f, n, p: '_'.join(['test_jacobian_set_item', p.args[0][0], p.args[1][0]]))
def test_jacobian_set_item(self, dtypes, shapes):
shape, constructor, expected_shape = shapes
dtype, value = dtypes
prob = Problem()
comp = ExplicitSetItemComp(dtype, value, shape, constructor)
comp = prob.model.add_subsystem('C1', comp)
prob.setup()
prob.set_solver_print(level=0)
prob.run_model()
prob.model.run_apply_nonlinear()
prob.model.run_linearize()
expected = constructor(value)
J = comp._jacobian
jac_out = J['out', 'in']
self.assertEqual(len(jac_out.shape), 2)
expected_dtype = np.promote_types(dtype, float)
self.assertEqual(jac_out.dtype, expected_dtype)
assert_near_equal(jac_out, np.atleast_2d(expected).reshape(expected_shape), 1e-15)
def test_group_assembled_jac_with_ext_mat(self):
class TwoSellarDis1(ExplicitComponent):
"""
Component containing Discipline 1 -- no derivatives version.
"""
def setup(self):
self.add_input('z', val=np.zeros(2))
self.add_input('x', val=np.zeros(2))
self.add_input('y2', val=np.ones(2))
self.add_output('y1', val=np.ones(2))
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
z1 = inputs['z'][0]
z2 = inputs['z'][1]
x1 = inputs['x']
y2 = inputs['y2']
outputs['y1'][0] = z1**2 + z2 + x1[0] - 0.2*y2[0]
outputs['y1'][1] = z1**2 + z2 + x1[0] - 0.2*y2[0]
def compute_partials(self, inputs, partials):
"""
Jacobian for Sellar discipline 1.
"""
partials['y1', 'y2'] =np.array([[-0.2, 0.], [0., -0.2]])
partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0], [2.0 * inputs['z'][0], 1.0]])
partials['y1', 'x'] = np.eye(2)
class TwoSellarDis2(ExplicitComponent):
def setup(self):
self.add_input('z', val=np.zeros(2))
self.add_input('y1', val=np.ones(2))
self.add_output('y2', val=np.ones(2))
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1[0].real < 0.0:
y1[0] *= -1
if y1[1].real < 0.0:
y1[1] *= -1
outputs['y2'][0] = y1[0]**.5 + z1 + z2
outputs['y2'][1] = y1[1]**.5 + z1 + z2
def compute_partials(self, inputs, J):
y1 = inputs['y1']
if y1[0].real < 0.0:
y1[0] *= -1
if y1[1].real < 0.0:
y1[1] *= -1
J['y2', 'y1'] = np.array([[.5*y1[0]**-.5, 0.], [0., .5*y1[1]**-.5]])
J['y2', 'z'] = np.array([[1.0, 1.0], [1.0, 1.0]])
prob = Problem()
model = prob.model
model.add_subsystem('px', IndepVarComp('x', np.array([1.0, 1.0])), promotes=['x'])
model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
sup = model.add_subsystem('sup', Group(), promotes=['*'])
sub1 = sup.add_subsystem('sub1', Group(), promotes=['*'])
sub2 = sup.add_subsystem('sub2', Group(), promotes=['*'])
d1 = sub1.add_subsystem('d1', TwoSellarDis1(), promotes=['x', 'z', 'y1', 'y2'])
sub2.add_subsystem('d2', TwoSellarDis2(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1[0] - y1[1]', y1=np.array([0.0, 0.0])),
promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', ExecComp('con2 = y2[0] + y2[1] - 24.0', y2=np.array([0.0, 0.0])),
promotes=['con2', 'y2'])
model.linear_solver = LinearBlockGS()
sup.linear_solver = LinearBlockGS()
sub1.linear_solver = DirectSolver(assemble_jac=True)
sub2.linear_solver = DirectSolver(assemble_jac=True)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_model()
of = ['con1', 'con2']
wrt = ['x', 'z']
# Make sure we don't get a size mismatch.
derivs = prob.compute_totals(of=of, wrt=wrt)
def test_assembled_jac_bad_key(self):
# this test fails if AssembledJacobian._update sets in_start with 'output' instead of 'input'
prob = Problem()
prob.model = Group(assembled_jac_type='dense')
prob.model.add_subsystem('indep', IndepVarComp('x', 1.0))
prob.model.add_subsystem('C1', ExecComp('c=a*2.0+b', a=0., b=0., c=0.))
c2 = prob.model.add_subsystem('C2', ExecComp('d=a*2.0+b+c', a=0., b=0., c=0., d=0.))
c3 = prob.model.add_subsystem('C3', ExecComp('ee=a*2.0', a=0., ee=0.))
prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.connect('indep.x', 'C1.a')
prob.model.connect('indep.x', 'C2.a')
prob.model.connect('C1.c', 'C2.b')
prob.model.connect('C2.d', 'C3.a')
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
assert_near_equal(prob['C3.ee'], 8.0, 0000.1)
def test_assembled_jacobian_submat_indexing_dense(self):
prob = Problem(model=Group(assembled_jac_type='dense'))
indeps = prob.model.add_subsystem('indeps', IndepVarComp())
indeps.add_output('x', 1.0)
indeps.add_output('y', 5.0)
indeps.add_output('z', 9.0)
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('y=2.0*x*x'))
G1.add_subsystem('C2', ExecComp('y=3.0*x*x'))
prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
G1.linear_solver = DirectSolver(assemble_jac=True)
# before the fix, we got bad offsets into the _ext_mtx matrix.
# to get entries in _ext_mtx, there must be at least one connection
# to an input in the system that owns the AssembledJacobian, from
# a source that is outside of that system. In this case, the 'indeps'
# system is outside of the 'G1' group which owns the AssembledJacobian.
prob.model.connect('indeps.y', 'G1.C1.x')
prob.model.connect('indeps.z', 'G1.C2.x')
prob.setup()
prob.run_model()
assert_near_equal(prob['G1.C1.y'], 50.0)
assert_near_equal(prob['G1.C2.y'], 243.0)
def test_assembled_jacobian_submat_indexing_csc(self):
prob = Problem(model=Group(assembled_jac_type='dense'))
indeps = prob.model.add_subsystem('indeps', IndepVarComp())
indeps.add_output('x', 1.0)
indeps.add_output('y', 5.0)
indeps.add_output('z', 9.0)
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('y=2.0*x*x'))
G1.add_subsystem('C2', ExecComp('y=3.0*x*x'))
# prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
prob.model.linear_solver = DirectSolver(assemble_jac=True)
G1.linear_solver = DirectSolver(assemble_jac=True)
G1.nonlinear_solver = NewtonSolver(solve_subsystems=False)
# before the fix, we got bad offsets into the _ext_mtx matrix.
# to get entries in _ext_mtx, there must be at least one connection
# to an input in the system that owns the AssembledJacobian, from
# a source that is outside of that system. In this case, the 'indeps'
# system is outside of the 'G1' group which owns the AssembledJacobian.
prob.model.connect('indeps.y', 'G1.C1.x')
prob.model.connect('indeps.z', 'G1.C2.x')
prob.setup()
prob.run_model()
assert_near_equal(prob['G1.C1.y'], 50.0)
assert_near_equal(prob['G1.C2.y'], 243.0)
def test_declare_partial_reference(self):
# Test for a bug where declare_partials is given an array reference
# that compute also uses and could get corrupted
class Comp(ExplicitComponent):
def setup(self):
self.add_input('x', val=1.0, shape=2)
self.add_output('y', val=1.0, shape=2)
self.val = 2 * np.ones(2)
self.rows = np.arange(2)
self.cols = np.arange(2)
self.declare_partials(
'y', 'x', val=self.val, rows=self.rows, cols=self.cols)
def compute(self, inputs, outputs):
outputs['y'][:] = 0.
np.add.at(
outputs['y'], self.rows,
self.val * inputs['x'][self.cols])
prob = Problem(model=Comp())
prob.setup()
prob.run_model()
assert_near_equal(prob['y'], 2 * np.ones(2))
def test_declare_partials_row_col_size_mismatch(self):
# Make sure we have clear error messages.
class Comp1(ExplicitComponent):
def setup(self):
self.add_input('x', val=np.array((2, 2)))
self.add_output('y', val=np.array((2, 2)))
self.declare_partials('y', 'x', rows=np.array([0, 1]), cols=np.array([0]))
def compute(self, inputs, outputs):
pass
class Comp2(ExplicitComponent):
def setup(self):
self.add_input('x', val=np.array((2, 2)))
self.add_output('y', val=np.array((2, 2)))
self.declare_partials('y', 'x', rows=np.array([0]), cols=np.array([0, 1]))
def compute(self, inputs, outputs):
pass
prob = Problem()
model = prob.model
model.add_subsystem('comp', Comp1())
msg = "'comp' <class Comp1>: d\(y\)/d\(x\): declare_partials has been called with rows and cols, which" + \
" should be arrays of equal length, but rows is length 2 while " + \
"cols is length 1."
with self.assertRaisesRegex(RuntimeError, msg):
prob.setup()
prob = Problem()
model = prob.model
model.add_subsystem('comp', Comp2())
msg = "'comp' <class Comp2>: d\(y\)/d\(x\): declare_partials has been called with rows and cols, which" + \
" should be arrays of equal length, but rows is length 1 while " + \
"cols is length 2."
with self.assertRaisesRegex(RuntimeError, msg):
prob.setup()
def test_assembled_jacobian_unsupported_cases(self):
class ParaboloidApply(ImplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0)
def linearize(self, inputs, outputs, jacobian):
return
def apply_linear(self, inputs, outputs, d_inputs, d_outputs, d_residuals,
mode):
d_residuals['x'] += (np.exp(outputs['x']) - 2*inputs['a']**2 * outputs['x'])*d_outputs['x']
d_residuals['x'] += (-2 * inputs['a'] * outputs['x']**2)*d_inputs['a']
# One level deep
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', ParaboloidApply())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
# Nested
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
sub = model.add_subsystem('sub', Group())
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
sub.add_subsystem('comp', ParaboloidApply())
model.connect('p1.x', 'sub.comp.x')
model.connect('p2.y', 'sub.comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
# Try a component that is derived from a matrix-free one
class FurtherDerived(ParaboloidApply):
def do_nothing(self):
pass
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', FurtherDerived())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
# Make sure regular comps don't give an error.
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', Paraboloid())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
prob.final_setup()
class JacVecComp(Paraboloid):
def setup_partials(self):
pass
def linearize(self, inputs, outputs, jacobian):
return
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
pass
# One level deep
prob = Problem()
model = prob.model = Group(assembled_jac_type='dense')
model.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('p2', IndepVarComp('y', val=1.0))
model.add_subsystem('comp', JacVecComp())
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
prob.setup()
msg = "AssembledJacobian not supported for matrix-free subcomponent."
with self.assertRaisesRegex(Exception, msg):
prob.run_model()
def test_access_undeclared_subjac(self):
class Undeclared(ExplicitComponent):
def setup(self):
self.add_input('x', val=0.0)
self.add_output('y', val=0.0)
def compute(self, inputs, outputs):
pass
def compute_partials(self, inputs, partials):
partials['y', 'x'] = 1.0
prob = Problem()
model = prob.model
model.add_subsystem('p1', IndepVarComp('x', val=1.0))
model.add_subsystem('comp', Undeclared())
model.connect('p1.x', 'comp.x')
prob.setup()
prob.run_model()
msg = 'Variable name pair \("{}", "{}"\) must first be declared.'
with self.assertRaisesRegex(KeyError, msg.format('y', 'x')):
J = prob.compute_totals(of=['comp.y'], wrt=['p1.x'])
def test_one_src_2_tgts_with_src_indices_densejac(self):
size = 4
prob = Problem(model=Group(assembled_jac_type='dense'))
indeps = prob.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('z=2.0*y+3.0*x', x=np.zeros(size//2), y=np.zeros(size//2),
z=np.zeros(size//2)))
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.add_objective('G1.C1.z')
prob.model.add_design_var('indeps.x')
prob.model.connect('indeps.x', 'G1.C1.x', src_indices=[0,1])
prob.model.connect('indeps.x', 'G1.C1.y', src_indices=[2,3])
prob.setup()
prob.run_model()
J = prob.compute_totals(of=['G1.C1.z'], wrt=['indeps.x'])
assert_near_equal(J['G1.C1.z', 'indeps.x'], np.array([[ 3., 0., 2., 0.],
[-0., 3., 0., 2.]]), .0001)
def test_one_src_2_tgts_csc_error(self):
size = 10
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
G1 = prob.model.add_subsystem('G1', Group())
G1.add_subsystem('C1', ExecComp('z=2.0*y+3.0*x', x=np.zeros(size), y=np.zeros(size),
z=np.zeros(size)))
prob.model.linear_solver = DirectSolver(assemble_jac=True)
prob.model.add_objective('G1.C1.z')
prob.model.add_design_var('indeps.x')
prob.model.connect('indeps.x', 'G1.C1.x')
prob.model.connect('indeps.x', 'G1.C1.y')
prob.setup(mode='fwd')
prob.run_model()
J = prob.compute_totals(of=['G1.C1.z'], wrt=['indeps.x'])
assert_near_equal(J['G1.C1.z', 'indeps.x'], np.eye(10)*5.0, .0001)
def test_dict_properties(self):
# Make sure you can use the partials variable passed to compute_partials as a dict
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('x', .5)
indeps.add_output('y', 10.0)
comp = SimpleCompWithPrintPartials()
prob.model.add_subsystem('paraboloid', comp, promotes_inputs=['x', 'y'])
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.model.add_design_var('x', lower=-50, upper=50)
prob.model.add_design_var('y', lower=-50, upper=50)
prob.model.add_objective('paraboloid.f_xy')
prob.setup()
prob.run_driver()
expected = [
(('paraboloid.f_xy', 'paraboloid.f_xy'),[-1.]),
(('paraboloid.f_xy', 'paraboloid.x'),[[0.]]),
(('paraboloid.f_xy', 'paraboloid.y'),[[0.]]),
]
self.assertEqual(sorted(comp.partials_name_pairs), sorted(e[0] for e in sorted(expected)))
self.assertEqual(sorted(comp.partials_name_pairs),
sorted(e[0] for e in sorted(expected)))
for act, exp in zip(
[e[1] for e in sorted(comp.partials_values)],
[e[1] for e in sorted(expected)],
):
assert_near_equal(act,exp, 1e-5)
def test_compute_totals_relevancy(self):
# When a model has desvars and responses defined, components that don't lie in the relevancy
# graph between them do not take part in the linear solve. This led to some derivatives
# being returned as zero in certain instances when it was called with wrt or of not in the
# set.
class DParaboloid(ExplicitComponent):
def setup(self):
ndvs = 3
self.add_input('w', val=1.)
self.add_input('x', shape=ndvs)
self.add_output('y', shape=1)
self.add_output('z', shape=ndvs)
self.declare_partials('y', 'x')
self.declare_partials('y', 'w')
self.declare_partials('z', 'x')
def compute(self, inputs, outputs):
x = inputs['x']
y_g = np.sum((x-5)**2)
outputs['y'] = np.sum(y_g) + (inputs['w']-10)**2
outputs['z'] = x**2
def compute_partials(self, inputs, J):
x = inputs['x']
J['y', 'x'] = 2*(x-5)
J['y', 'w'] = 2*(inputs['w']-10)
J['z', 'x'] = np.diag(2*x)
p = Problem()
d_ivc = p.model.add_subsystem('distrib_ivc',
IndepVarComp(),
promotes=['*'])
ndvs = 3
d_ivc.add_output('x', 2*np.ones(ndvs))
ivc = p.model.add_subsystem('ivc',
IndepVarComp(),
promotes=['*'])
ivc.add_output('w', 2.0)
p.model.add_subsystem('dp', DParaboloid(), promotes=['*'])
p.model.add_design_var('x', lower=-100, upper=100)
p.model.add_objective('y')
p.setup(mode='rev')
p.run_model()
J = p.compute_totals(of=['y', 'z'], wrt=['w', 'x'])
assert(J['y','w'][0,0] == -16)
def test_wildcard_partials_bug(self):
# Test for a bug where using wildcards when declaring partials resulted in extra
# derivatives of an output wrt other outputs.
class ODE(ExplicitComponent):
def setup(self):
self.add_input('a', 1.0)
self.add_output('x', 1.0)
self.add_output('y', 1.0)
self.declare_partials(of='*', wrt='*', method='cs')
def compute(self, inputs, outputs):
a = inputs['a']
outputs['x'] = 3.0 * a
outputs['y'] = 7.0 * a
p = Problem()
p.model.add_subsystem('ode', ODE())
p.model.linear_solver = DirectSolver()
p.model.add_design_var('ode.a')
p.model.add_constraint('ode.x', lower=0.0)
p.model.add_constraint('ode.y', lower=0.0)
p.setup()
p.run_model()
p.compute_totals()
keys = p.model.ode._jacobian._subjacs_info
self.assertTrue(('ode.x', 'ode.y') not in keys)
self.assertTrue(('ode.y', 'ode.x') not in keys)
def test_set_col(self):
class MyComp(ExplicitComponent):
def setup(self):
self.ofsizes = [3, 5, 2]
self.wrtsizes = [4, 1, 3]
for i, sz in enumerate(self.wrtsizes):
self.add_input(f"x{i}", val=np.ones(sz))
for i, sz in enumerate(self.ofsizes):
self.add_output(f"y{i}", val=np.ones(sz))
boolarr = rand_sparsity((sum(self.ofsizes), sum(self.wrtsizes)), .3, dtype=bool)
self.sparsity = np.asarray(boolarr.toarray(), dtype=float)
ofstart = ofend = 0
for i, ofsz in enumerate(self.ofsizes):
wrtstart = wrtend = 0
ofend += ofsz
for j, wrtsz in enumerate(self.wrtsizes):
wrtend += wrtsz
sub = self.sparsity[ofstart:ofend, wrtstart:wrtend]
rows, cols = np.nonzero(sub)
self.declare_partials([f"y{i}"], [f"x{j}"], rows=rows, cols=cols)
wrtstart = wrtend
ofstart = ofend
def compute(self, inputs, outputs):
outputs.set_val(self.sparsity.dot(inputs.asarray()) * 2.)
def compute_partials(self, inputs, partials):
# these partials are actually constant, but...
ofstart = ofend = 0
for i, ofsz in enumerate(self.ofsizes):
wrtstart = wrtend = 0
ofend += ofsz
for j, wrtsz in enumerate(self.wrtsizes):
wrtend += wrtsz
sub = self.sparsity[ofstart:ofend, wrtstart:wrtend]
subinfo = self._subjacs_info[(f'comp.y{i}', f'comp.x{j}')]
partials[f'y{i}', f'x{j}'] = sub[subinfo['rows'], subinfo['cols']] * 2.
wrtstart = wrtend
ofstart = ofend
p = Problem()
comp = p.model.add_subsystem('comp', MyComp())
p.setup()
for i, sz in enumerate(comp.wrtsizes):
p[f'comp.x{i}'] = np.random.random(sz)
p.run_model()
ofs = [f'comp.y{i}' for i in range(len(comp.ofsizes))]
wrts = [f'comp.x{i}' for i in range(len(comp.wrtsizes))]
p.check_partials(out_stream=None, show_only_incorrect=True)
p.model.comp._jacobian.set_col(p.model.comp, 5, comp.sparsity[:, 5] * 99)
# check dy0/dx2 (3x3)
subinfo = comp._subjacs_info['comp.y0', 'comp.x2']
arr = np.zeros(subinfo['shape'])
arr[subinfo['rows'], subinfo['cols']] = subinfo['val']
assert_near_equal(arr[:, 0], comp.sparsity[0:3, 5] * 99)
# check dy1/dx2 (5x3)
subinfo = comp._subjacs_info['comp.y1', 'comp.x2']
arr = np.zeros(subinfo['shape'])
arr[subinfo['rows'], subinfo['cols']] = subinfo['val']
assert_near_equal(arr[:, 0], comp.sparsity[3:8, 5] * 99)
# check dy2/dx2 (2x3)
subinfo = comp._subjacs_info['comp.y2', 'comp.x2']
arr = np.zeros(subinfo['shape'])
arr[subinfo['rows'], subinfo['cols']] = subinfo['val']
assert_near_equal(arr[:, 0], comp.sparsity[8:, 5] * 99)
def test_jacsize_error_message(self):
class MyComp(ExplicitComponent):
def setup(self):
self.add_input('x', np.ones(2))
self.add_output('y', np.ones(3))
self.declare_partials('y', 'x', rows=np.array([0, 1]), cols=np.array([1, 0]))
def compute_partials(self, inputs, partials):
partials['y', 'x'] = np.ones((3, ))
prob = Problem()
model = prob.model
model.add_subsystem('comp', MyComp())
prob.setup()
prob.run_model()
msg = "'comp' \<class MyComp\>: Error calling compute_partials\(\), DictionaryJacobian in 'comp' \<class MyComp\>: Sub-jacobian for key \('comp.y', 'comp.x'\) has the wrong shape \(\(3,\)\), expected \(\(2,\)\)."
with self.assertRaisesRegex(ValueError, msg):
prob.compute_totals(of=['comp.y'], wrt=['comp.x'])
class MySparseComp(ExplicitComponent):
def setup(self):
self.add_input('y', np.zeros(2))
self.add_input('x', np.zeros(2))
self.add_output('z', np.zeros(2))
# partials use sparse list format
self.declare_partials('z', 'x', rows=[0, 1], cols=[0, 1])
self.declare_partials('z', 'y', rows=[0, 1], cols=[1, 0])
def compute(self, inputs, outputs):
outputs['z'] = np.array([3.0*inputs['x'][0]**3 + 4.0*inputs['y'][1]**2,
5.0*inputs['x'][1]**2 * inputs['y'][0]])
def compute_partials(self, inputs, partials):
partials['z', 'x'] = np.array([9.0*inputs['x'][0]**2, 10.0*inputs['x'][1]*inputs['y'][0]])
partials['z', 'y'] = np.array([8.0*inputs['y'][1], 5.0*inputs['x'][1]**2])
class MyDenseComp(ExplicitComponent):
def setup(self):
self.add_input('y', np.zeros(2))
self.add_input('x', np.zeros(2))
self.add_output('z', np.zeros(2))
# partials are dense
self.declare_partials('z', 'x')
self.declare_partials('z', 'y')
def compute(self, inputs, outputs):
outputs['z'] = np.array([3.0*inputs['x'][0]**3 + 4.0*inputs['y'][1]**2,
5.0*inputs['x'][1]**2 * inputs['y'][0]])
def compute_partials(self, inputs, partials):
partials['z', 'x'] = np.array([[9.0*inputs['x'][0]**2, 0.0], [0.0, 10.0*inputs['x'][1]*inputs['y'][0]]])
partials['z', 'y'] = np.array([[0.0, 8.0*inputs['y'][1]], [5.0*inputs['x'][1]**2, 0.0]])
class OverlappingPartialsTestCase(unittest.TestCase):
def test_repeated_src_indices_csc(self):
size = 2
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
p.model.add_subsystem('C1', ExecComp('z=3.0*x[0]**3 + 2.0*x[1]**2', x=np.zeros(size)))
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', 'C1.x', src_indices=[1,1])
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix.toarray(),
np.array([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 13., -1.]]))
def test_repeated_src_indices_dense(self):
size = 2
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(size)))
p.model.add_subsystem('C1', ExecComp('z=3.0*x[0]**3 + 2.0*x[1]**2', x=np.zeros(size)))
p.model.options['assembled_jac_type'] = 'dense'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', 'C1.x', src_indices=[1,1])
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix,
np.array([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 13., -1.]]))
def test_multi_inputs_same_src_dense_comp(self):
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(2)))
p.model.add_subsystem('C1', MyDenseComp())
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', ('C1.x', 'C1.y'))
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix.toarray(),
np.array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 9., 8., -1., 0.],
[ 5., 10., 0., -1.]]))
def test_multi_inputs_same_src_sparse_comp(self):
p = Problem()
indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(2)))
p.model.add_subsystem('C1', MySparseComp())
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.model.connect('indeps.x', ('C1.x', 'C1.y'))
p.setup()
p.run_model()
J = p.compute_totals(of=['C1.z'], wrt=['indeps.x'], return_format='array')
np.testing.assert_almost_equal(p.model._assembled_jac._int_mtx._matrix.toarray(),
np.array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 9., 8., -1., 0.],
[ 5., 10., 0., -1.]]))
class MaskingTestCase(unittest.TestCase):
def test_csc_masking(self):
class CCBladeResidualComp(ImplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options.declare('num_radial', types=int)
def setup(self):
num_nodes = self.options['num_nodes']
num_radial = self.options['num_radial']
self.add_input('chord', shape=(1, num_radial))
self.add_input('theta', shape=(1, num_radial))
self.add_output('phi', lower=-0.5*np.pi, upper=0.0,
shape=(num_nodes, num_radial))
self.add_output('Tp', shape=(num_nodes, num_radial))
of_names = ('phi', 'Tp')
row_col = np.arange(num_radial)
for name in of_names:
self.declare_partials(name, 'chord', rows=row_col, cols=row_col)
self.declare_partials(name, 'theta', rows=row_col, cols=row_col, val=0.0)
self.declare_partials(name, 'phi', rows=row_col, cols=row_col)
self.declare_partials('Tp', 'Tp', rows=row_col, cols=row_col, val=1.)
def linearize(self, inputs, outputs, partials):
partials['phi', 'chord'] = np.array([1., 2, 3, 4])
partials['phi', 'phi'] = np.array([5., 6, 7, 8])
partials['Tp', 'chord'] = np.array([9., 10, 11, 12])
partials['Tp', 'phi'] = np.array([13., 14, 15, 16])
prob = Problem()
model = prob.model
comp = IndepVarComp()
comp.add_output('chord', val=np.ones((4, )))
model.add_subsystem('indep_var_comp', comp, promotes=['*'])
comp = CCBladeResidualComp(num_nodes=1, num_radial=4, assembled_jac_type='csc')
comp.linear_solver = DirectSolver(assemble_jac=True)
model.add_subsystem('ccblade_comp', comp, promotes_inputs=['chord'], promotes_outputs=['Tp'])
prob.setup(mode='fwd')
prob.run_model()
totals = prob.compute_totals(of=['Tp'], wrt=['chord'], return_format='array')
expected = np.array([
[-6.4,0.,0.,0.],
[ 0.,-5.33333333,0.,0.],
[ 0.,0.,-4.57142857,0.],
[ 0.,0.,0.,-4.]]
)
np.testing.assert_allclose(totals, expected)
if __name__ == '__main__':
unittest.main()
| 36.576078 | 220 | 0.546427 |
351cfa60f3c37d069c6baf48cadf28efd1bd6e2c | 3,518 | py | Python | demo/webcam.py | Zhangyongtao123/maskrcnn_benchmark | 059f04c26df2c1bd19dd7360ee2487ce3461da37 | [
"MIT"
] | null | null | null | demo/webcam.py | Zhangyongtao123/maskrcnn_benchmark | 059f04c26df2c1bd19dd7360ee2487ce3461da37 | [
"MIT"
] | null | null | null | demo/webcam.py | Zhangyongtao123/maskrcnn_benchmark | 059f04c26df2c1bd19dd7360ee2487ce3461da37 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
import os
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
# added for video
video_path = './1.MP4'
cam = cv2.VideoCapture(video_path)
fps = 30
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
# video_writer = cv2.VideoWriter(filename='./result.avi', fourcc=fourcc, fps=fps, frameSize=(640,480))
video_writer = cv2.VideoWriter(filename='./result2.avi', fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=fps,
frameSize=(853, 480))
while True:
start_time = time.time()
ret_val, img = cam.read()
if img is None:
break
composite = coco_demo.run_on_opencv_image(img)
# cv2.imwrite('a.jpg', composite)
print("Time: {:.2f} s / img".format(time.time() - start_time))
# cv2.imshow("COCO detections", composite)
# if cv2.waitKey(1) == 27:
# break # esc to quit
# added by zhangyongtao
composite = cv2.resize(composite, (853, 480))
video_writer.write(composite)
# # ./result1.avi 这个相对路径是对于py程序而言,所以avi视频文件被保存在py程序所在的文件夹
# for i in range(1, 1841):
# p = i
# # print(str(p)+'.png'+'233333')
# if os.path.exists('E:/data/Caltech_jpg/set07/set07_V000_' + str(p) + '.jpg'): # 判断图片是否存在
# img = cv2.imread(filename='E:/data/Caltech_jpg/set07/set07_V000_' + str(p) + '.jpg')
# # cv2.waitKey(10)
# img = cv2.resize(img, (640, 480))
# video_writer.write(img)
# print(str(p) + '.jpg' + ' done!')
# cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 31.981818 | 109 | 0.604321 |
d98ea976b4088f9cb5e791ed99f66d6b156bd56e | 7,901 | py | Python | code/face-detection/tools/wider_test.py | FRH-Code-Data/Appendix | 106a7c65c178d2b446e3bd8fb192ac2f4b7e323f | [
"CC-BY-4.0"
] | 5 | 2020-02-28T09:28:55.000Z | 2021-06-03T02:15:42.000Z | code/face-detection/tools/wider_test.py | FRH-Code-Data/Appendix | 106a7c65c178d2b446e3bd8fb192ac2f4b7e323f | [
"CC-BY-4.0"
] | 6 | 2020-03-08T22:58:13.000Z | 2022-03-12T00:15:14.000Z | code/face-detection/tools/wider_test.py | FRH-Code-Data/Appendix | 106a7c65c178d2b446e3bd8fb192ac2f4b7e323f | [
"CC-BY-4.0"
] | 3 | 2020-02-28T09:29:03.000Z | 2020-03-09T05:08:07.000Z | #-*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import torch
import argparse
import torch.nn as nn
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import os.path as osp
import cv2
import time
import numpy as np
from PIL import Image
import scipy.io as sio
from data.config import cfg
from s3fd import build_s3fd
from torch.autograd import Variable
from utils.augmentations import to_chw_bgr
parser = argparse.ArgumentParser(description='s3fd evaluatuon wider')
parser.add_argument('--model', type=str,
default='weights/s3fd.pth', help='trained model')
parser.add_argument('--thresh', default=0.05, type=float,
help='Final confidence threshold')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
def detect_face(net, img, shrink):
if shrink != 1:
img = cv2.resize(img, None, None, fx=shrink, fy=shrink,
interpolation=cv2.INTER_LINEAR)
x = to_chw_bgr(img)
x = x.astype('float32')
x -= cfg.img_mean
x = x[[2, 1, 0], :, :]
x = Variable(torch.from_numpy(x).unsqueeze(0))
if use_cuda:
x = x.cuda()
# print(x.size())
y = net(x)
detections = y.data
detections = detections.cpu().numpy()
det_conf = detections[0, 1, :, 0]
det_xmin = img.shape[1] * detections[0, 1, :, 1] / shrink
det_ymin = img.shape[0] * detections[0, 1, :, 2] / shrink
det_xmax = img.shape[1] * detections[0, 1, :, 3] / shrink
det_ymax = img.shape[0] * detections[0, 1, :, 4] / shrink
det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))
keep_index = np.where(det[:, 4] >= args.thresh)[0]
det = det[keep_index, :]
return det
def flip_test(net, image, shrink):
image_f = cv2.flip(image, 1)
det_f = detect_face(net, image_f, shrink)
det_t = np.zeros(det_f.shape)
det_t[:, 0] = image.shape[1] - det_f[:, 2]
det_t[:, 1] = det_f[:, 1]
det_t[:, 2] = image.shape[1] - det_f[:, 0]
det_t[:, 3] = det_f[:, 3]
det_t[:, 4] = det_f[:, 4]
return det_t
def multi_scale_test(net, image, max_im_shrink):
# shrink detecting and shrink only detect big face
st = 0.5 if max_im_shrink >= 0.75 else 0.5 * max_im_shrink
det_s = detect_face(net, image, st)
index = np.where(np.maximum(
det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1) > 30)[0]
det_s = det_s[index, :]
# enlarge one times
bt = min(2, max_im_shrink) if max_im_shrink > 1 else (
st + max_im_shrink) / 2
det_b = detect_face(net, image, bt)
# enlarge small image x times for small face
if max_im_shrink > 2:
bt *= 2
while bt < max_im_shrink:
det_b = np.row_stack((det_b, detect_face(net, image, bt)))
bt *= 2
det_b = np.row_stack((det_b, detect_face(net, image, max_im_shrink)))
# enlarge only detect small face
if bt > 1:
index = np.where(np.minimum(
det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
det_b = det_b[index, :]
else:
index = np.where(np.maximum(
det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
det_b = det_b[index, :]
return det_s, det_b
def bbox_vote(det):
order = det[:, 4].ravel().argsort()[::-1]
det = det[order, :]
while det.shape[0] > 0:
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = np.maximum(det[0, 1], det[:, 1])
xx2 = np.minimum(det[0, 2], det[:, 2])
yy2 = np.minimum(det[0, 3], det[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[0] + area[:] - inter)
# get needed merge det and delete these det
merge_index = np.where(o >= 0.3)[0]
det_accu = det[merge_index, :]
det = np.delete(det, merge_index, 0)
if merge_index.shape[0] <= 1:
continue
det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))
max_score = np.max(det_accu[:, 4])
det_accu_sum = np.zeros((1, 5))
det_accu_sum[:, 0:4] = np.sum(
det_accu[:, 0:4], axis=0) / np.sum(det_accu[:, -1:])
det_accu_sum[:, 4] = max_score
try:
dets = np.row_stack((dets, det_accu_sum))
except:
dets = det_accu_sum
dets = dets[0:750, :]
return dets
def get_data():
subset = 'val'
if subset is 'val':
wider_face = sio.loadmat(
'./eval_tools/wider_face_val.mat')
else:
wider_face = sio.loadmat(
'./eval_tools/wider_face_test.mat')
event_list = wider_face['event_list']
file_list = wider_face['file_list']
del wider_face
imgs_path = os.path.join(
cfg.FACE.WIDER_DIR, 'WIDER_{}'.format(subset), 'images')
save_path = 'eval_tools/s3fd_{}'.format(subset)
return event_list, file_list, imgs_path, save_path
if __name__ == '__main__':
event_list, file_list, imgs_path, save_path = get_data()
cfg.USE_NMS = False
net = build_s3fd('test', cfg.NUM_CLASSES)
net.load_state_dict(torch.load(args.model))
net.eval()
if use_cuda:
net.cuda()
cudnn.benckmark = True
#transform = S3FDBasicTransform(cfg.INPUT_SIZE, cfg.MEANS)
counter = 0
for index, event in enumerate(event_list):
filelist = file_list[index][0]
path = os.path.join(save_path, event[0][0].encode('utf-8'))
if not os.path.exists(path):
os.makedirs(path)
for num, file in enumerate(filelist):
im_name = file[0][0].encode('utf-8')
in_file = os.path.join(imgs_path, event[0][0], im_name[:] + '.jpg')
#img = cv2.imread(in_file)
img = Image.open(in_file)
if img.mode == 'L':
img = img.convert('RGB')
img = np.array(img)
# max_im_shrink = (0x7fffffff / 577.0 /
# (img.shape[0] * img.shape[1])) ** 0.5
max_im_shrink = np.sqrt(
1700 * 1200 / (img.shape[0] * img.shape[1]))
shrink = max_im_shrink if max_im_shrink < 1 else 1
counter += 1
t1 = time.time()
det0 = detect_face(net, img, shrink)
det1 = flip_test(net, img, shrink) # flip test
[det2, det3] = multi_scale_test(net, img, max_im_shrink)
det = np.row_stack((det0, det1, det2, det3))
dets = bbox_vote(det)
t2 = time.time()
print('Detect %04d th image costs %.4f' % (counter, t2 - t1))
fout = open(osp.join(save_path, event[0][
0].encode('utf-8'), im_name + '.txt'), 'w')
fout.write('{:s}\n'.format(event[0][0].encode(
'utf-8') + '/' + im_name + '.jpg'))
fout.write('{:d}\n'.format(dets.shape[0]))
for i in xrange(dets.shape[0]):
xmin = dets[i][0]
ymin = dets[i][1]
xmax = dets[i][2]
ymax = dets[i][3]
score = dets[i][4]
fout.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.
format(xmin, ymin, (xmax - xmin + 1), (ymax - ymin + 1), score))
| 32.64876 | 92 | 0.542463 |
2b5860397ec2c58d9f45fee3e3eebdc0358fad80 | 2,484 | py | Python | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/known_argument_names.py | borisgrafx/client | c079f7816947a3092b500751eb920fda3866985f | [
"MIT"
] | 3,968 | 2017-08-23T21:27:19.000Z | 2022-03-31T22:00:19.000Z | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/known_argument_names.py | borisgrafx/client | c079f7816947a3092b500751eb920fda3866985f | [
"MIT"
] | 2,725 | 2017-04-17T00:29:15.000Z | 2022-03-31T21:01:53.000Z | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/known_argument_names.py | borisgrafx/client | c079f7816947a3092b500751eb920fda3866985f | [
"MIT"
] | 351 | 2018-04-08T19:39:34.000Z | 2022-03-30T19:38:08.000Z | from ...error import GraphQLError
from ...language import ast
from ...utils.quoted_or_list import quoted_or_list
from ...utils.suggestion_list import suggestion_list
from .base import ValidationRule
def _unknown_arg_message(arg_name, field_name, type, suggested_args):
message = 'Unknown argument "{}" on field "{}" of type "{}".'.format(arg_name, field_name, type)
if suggested_args:
message += ' Did you mean {}?'.format(quoted_or_list(suggested_args))
return message
def _unknown_directive_arg_message(arg_name, directive_name, suggested_args):
message = 'Unknown argument "{}" on directive "@{}".'.format(arg_name, directive_name)
if suggested_args:
message += ' Did you mean {}?'.format(quoted_or_list(suggested_args))
return message
class KnownArgumentNames(ValidationRule):
def enter_Argument(self, node, key, parent, path, ancestors):
argument_of = ancestors[-1]
if isinstance(argument_of, ast.Field):
field_def = self.context.get_field_def()
if not field_def:
return
field_arg_def = field_def.args.get(node.name.value)
if not field_arg_def:
parent_type = self.context.get_parent_type()
assert parent_type
self.context.report_error(GraphQLError(
_unknown_arg_message(
node.name.value,
argument_of.name.value,
parent_type.name,
suggestion_list(
node.name.value,
(arg_name for arg_name in field_def.args.keys())
)
),
[node]
))
elif isinstance(argument_of, ast.Directive):
directive = self.context.get_directive()
if not directive:
return
directive_arg_def = directive.args.get(node.name.value)
if not directive_arg_def:
self.context.report_error(GraphQLError(
_unknown_directive_arg_message(
node.name.value,
directive.name,
suggestion_list(
node.name.value,
(arg_name for arg_name in directive.args.keys())
)
),
[node]
))
| 34.985915 | 100 | 0.551932 |
4cae1ed4ffee609363ee0126af570870c7592f44 | 12,536 | py | Python | src/lamplib/src/genny/tasks/run_tests.py | samanca/genny | 0e45d89f998f28e6227bedcce4e8d7d1b1d13d79 | [
"Apache-2.0"
] | null | null | null | src/lamplib/src/genny/tasks/run_tests.py | samanca/genny | 0e45d89f998f28e6227bedcce4e8d7d1b1d13d79 | [
"Apache-2.0"
] | 1 | 2021-09-06T20:06:24.000Z | 2021-09-06T20:06:24.000Z | src/lamplib/src/genny/tasks/run_tests.py | samanca/genny | 0e45d89f998f28e6227bedcce4e8d7d1b1d13d79 | [
"Apache-2.0"
] | null | null | null | import structlog
import os
import shutil
from typing import Callable, TypeVar, Tuple, Optional
from genny import curator, cmd_runner, toolchain
SLOG = structlog.get_logger(__name__)
# We rely on catch2 to report test failures, but it doesn't always do so.
# See https://github.com/catchorg/Catch2/issues/1210
# As a workaround, we generate a dummy report with a failed test that is
# deleted if the test succeeds.
_sentinel_report = """
<?xml version="1.0" encoding="UTF-8"?>
<testsuites>
<testsuite name="test_failure_sentinel" errors="0" failures="1" tests="1" hostname="tbd" time="1.0" timestamp="2019-01-01T00:00:00Z">
<testcase classname="test_failure_sentinel" name="A test failed early and a report was not generated" time="1.0">
<failure message="test did not exit cleanly, see task log for detail" type="">
</failure>
</testcase>
<system-out/>
<system-err/>
</testsuite>
</testsuites>
""".strip()
T = TypeVar("T")
def _outcome_was_true(outcome: bool) -> bool:
return outcome
def _run_command_with_sentinel_report(
genny_repo_root: str,
workspace_root: str,
cmd_func: Callable[..., T],
checker_func: Callable[[T], bool] = None,
) -> Tuple[T, bool]:
if checker_func is None:
checker_func = _outcome_was_true
sentinel_file = os.path.join(workspace_root, "build", "XUnitXML", "sentinel.junit.xml")
os.makedirs(name=os.path.dirname(sentinel_file), exist_ok=True)
success = False
try:
with open(sentinel_file, "w") as f:
f.write(_sentinel_report)
SLOG.debug("Created sentinel file", sentinel_file=sentinel_file)
with curator.poplar_grpc(
cleanup_metrics=True, workspace_root=workspace_root, genny_repo_root=genny_repo_root
):
cmd_output = cmd_func()
success = checker_func(cmd_output)
return cmd_output, success
finally:
if success and os.path.exists(sentinel_file):
os.remove(sentinel_file)
SLOG.debug(
"Command finished. Left sentinel_file in place unless success.",
success=success,
sentinel_file=sentinel_file,
)
def cmake_test(genny_repo_root: str, workspace_root: str):
info = toolchain.toolchain_info(genny_repo_root=genny_repo_root, workspace_root=workspace_root)
workdir = os.path.join(genny_repo_root, "build")
xunit_dir = os.path.join(workspace_root, "build", "XUnitXML")
os.makedirs(xunit_dir, exist_ok=True)
ctest_cmd = [
"ctest",
"--verbose",
"--label-exclude",
"(standalone|sharded|single_node_replset|three_node_replset|benchmark)",
]
def cmd_func() -> bool:
output: cmd_runner.RunCommandOutput = cmd_runner.run_command(
cmd=ctest_cmd, cwd=workdir, env=info.toolchain_env, capture=False, check=True
)
return output.returncode == 0
_run_command_with_sentinel_report(
cmd_func=cmd_func, workspace_root=workspace_root, genny_repo_root=genny_repo_root
)
def benchmark_test(genny_repo_root: str, workspace_root: str):
info = toolchain.toolchain_info(genny_repo_root=genny_repo_root, workspace_root=workspace_root)
workdir = os.path.join(genny_repo_root, "build")
ctest_cmd = ["ctest", "--label-regex", "(benchmark)"]
def cmd_func():
output: cmd_runner.RunCommandOutput = cmd_runner.run_command(
cmd=ctest_cmd, cwd=workdir, env=info.toolchain_env, capture=False, check=True
)
return output.returncode == 0
_run_command_with_sentinel_report(
cmd_func=cmd_func, workspace_root=workspace_root, genny_repo_root=genny_repo_root
)
def _check_create_new_actor_test_report(workspace_root: str) -> Callable[[str], bool]:
def out(cmd_output: str) -> bool:
passed = False
report_file = os.path.join(
workspace_root, "build", "XUnitXML", "create_new_actor_test.junit.xml"
)
if not os.path.isfile(report_file):
SLOG.error("Failed to find report file", report_file=report_file)
return passed
expected_to_find = {"100 == 101", 'failures="1"'}
with open(report_file) as f:
report = f.read()
passed = all(ex in report for ex in expected_to_find)
if passed:
SLOG.debug("Test passed. Removing report file.", report_file=report_file)
os.remove(report_file) # Remove the report file for the expected failure.
else:
SLOG.error(
"test for create-new-actor script did not succeed. Failed to find expected "
"messages in report file",
expected_to_find=expected_to_find,
)
return passed
return out
# See the logic in _setup_resmoke.
# These are the "Binaries" evergreen artifact URLs for mongodb-mongo compile tasks.
# The binaries must be compatible with the version of the mongo repo checked out in use for resmoke,
# which is the sha "298d4d6bbb9980b74bded06241067fe6771bef68" mentioned below.
_canned_artifacts = {
"osx": "https://mciuploads.s3.amazonaws.com/mongodb-mongo-master/macos/298d4d6bbb9980b74bded06241067fe6771bef68/binaries/mongo-mongodb_mongo_master_macos_298d4d6bbb9980b74bded06241067fe6771bef68_20_10_22_00_55_19.tgz",
"amazon2": "https://mciuploads.s3.amazonaws.com/mongodb-mongo-master/amazon2/298d4d6bbb9980b74bded06241067fe6771bef68/binaries/mongo-mongodb_mongo_master_amazon2_298d4d6bbb9980b74bded06241067fe6771bef68_20_10_22_00_55_19.tgz",
}
def _setup_resmoke(
workspace_root: str,
genny_repo_root: str,
mongo_dir: Optional[str],
mongodb_archive_url: Optional[str],
):
if mongo_dir is not None:
mongo_repo_path = mongo_dir
else:
evergreen_mongo_repo = os.path.join(workspace_root, "src", "mongo")
if os.path.exists(evergreen_mongo_repo):
mongo_repo_path = evergreen_mongo_repo
else:
mongo_repo_path = os.path.join(genny_repo_root, "build", "resmoke-mongo")
xunit_xml_path = os.path.join(workspace_root, "build", "XUnitXML")
os.makedirs(xunit_xml_path, exist_ok=True)
SLOG.info("Created xunit result dir", path=xunit_xml_path)
resmoke_venv: str = os.path.join(mongo_repo_path, "resmoke_venv")
resmoke_python: str = os.path.join(resmoke_venv, "bin", "python3")
# Clone repo unless exists
if not os.path.exists(mongo_repo_path):
SLOG.info("Mongo repo doesn't exist. Checking it out.", mongo_repo_path=mongo_repo_path)
cmd_runner.run_command(
cmd=["git", "clone", "git@github.com:mongodb/mongo.git", mongo_repo_path],
cwd=workspace_root,
check=True,
capture=False,
)
cmd_runner.run_command(
# If changing this sha, you may need to use later binaries
# in the _canned_artifacts dict.
cmd=["git", "checkout", "298d4d6bbb9980b74bded06241067fe6771bef68"],
cwd=mongo_repo_path,
check=True,
capture=False,
)
else:
SLOG.info("Using existing mongo repo checkout", mongo_repo_path=mongo_repo_path)
cmd_runner.run_command(
cmd=["git", "rev-parse", "HEAD"], check=False, cwd=mongo_repo_path, capture=False,
)
# Look for mongod in
# build/opt/mongo/db/mongod
# build/install/bin/mongod
# bin/
opt = os.path.join(mongo_repo_path, "build", "opt", "mongo", "db", "mongod")
install = os.path.join(mongo_repo_path, "build", "install", "bin", "mongod")
from_tarball = os.path.join(mongo_repo_path, "bin", "mongod")
if os.path.exists(opt):
mongod = opt
elif os.path.exists(install):
mongod = install
elif os.path.exists(from_tarball):
mongod = from_tarball
else:
mongod = None
if mongod is not None and mongodb_archive_url is not None:
SLOG.info(
"Found existing mongod so will not download artifacts.",
existing_mongod=mongod,
wont_download_artifacts_from=mongodb_archive_url,
)
if mongod is None:
SLOG.info(
"Couldn't find pre-build monogod. Fetching and installing.",
looked_at=(opt, install, from_tarball),
fetching=mongodb_archive_url,
)
if mongodb_archive_url is None:
info = toolchain.toolchain_info(
genny_repo_root=genny_repo_root, workspace_root=workspace_root
)
if info.is_darwin:
artifact_key = "osx"
elif info.linux_distro == "amazon2":
artifact_key = "amazon2"
else:
raise Exception(
f"No pre-built artifacts for distro {info.linux_distro}. You can either:"
f"1. compile/install a local mongo checkout in ./src/mongo."
f"2. Modify the _canned_artifacts dict in the genny python to include an artifact from a waterfall build."
f"3. Pass in the --mongodb-archive-url parameter to force a canned artifact."
)
mongodb_archive_url = _canned_artifacts[artifact_key]
cmd_runner.run_command(
cmd=["curl", "-LSs", mongodb_archive_url, "-o", "mongodb.tgz"],
cwd=mongo_repo_path,
capture=False,
check=True,
)
cmd_runner.run_command(
cmd=["tar", "--strip-components=1", "-zxf", "mongodb.tgz"],
cwd=mongo_repo_path,
capture=False,
check=True,
)
mongod = from_tarball
bin_dir = os.path.dirname(mongod)
# Setup resmoke venv unless exists
resmoke_setup_sentinel = os.path.join(resmoke_venv, "setup-done")
if not os.path.exists(resmoke_setup_sentinel):
SLOG.info("Resmoke venv doesn't exist. Creating.", resmoke_venv=resmoke_venv)
shutil.rmtree(resmoke_venv, ignore_errors=True)
import venv
venv.create(env_dir=resmoke_venv, with_pip=True, symlinks=True)
reqs_file = os.path.join(mongo_repo_path, "etc", "pip", "evgtest-requirements.txt")
cmd = [resmoke_python, "-mpip", "install", "-r", reqs_file]
cmd_runner.run_command(
cmd=cmd, cwd=workspace_root, capture=False, check=True,
)
open(resmoke_setup_sentinel, "w")
return resmoke_python, mongo_repo_path, bin_dir
def _nop_true(cmd_output: str) -> bool:
return True
def resmoke_test(
genny_repo_root: str,
workspace_root: str,
suites: str,
is_cnats: bool,
mongo_dir: Optional[str],
env: dict,
mongodb_archive_url: Optional[str],
):
if (not suites) and (not is_cnats):
raise ValueError('Must specify either "--suites" or "--create-new-actor-test-suite"')
if is_cnats:
suites = os.path.join(genny_repo_root, "src", "resmokeconfig", "genny_create_new_actor.yml")
checker_func = _check_create_new_actor_test_report(workspace_root=workspace_root)
else:
checker_func = _nop_true
resmoke_python, mongo_repo_path, bin_dir = _setup_resmoke(
workspace_root=workspace_root,
genny_repo_root=genny_repo_root,
mongo_dir=mongo_dir,
mongodb_archive_url=mongodb_archive_url,
)
mongod = os.path.join(bin_dir, "mongod")
mongo = os.path.join(bin_dir, "mongo")
mongos = os.path.join(bin_dir, "mongos")
cmd = [
resmoke_python,
os.path.join(mongo_repo_path, "buildscripts", "resmoke.py"),
"run",
"--suite",
suites,
"--configDir",
os.path.join(mongo_repo_path, "buildscripts", "resmokeconfig"),
"--mongod",
mongod,
"--mongo",
mongo,
"--mongos",
mongos,
]
def run_resmoke() -> None:
# See if we can put this in the resmoke suite def or something?
env["CTEST_OUTPUT_ON_FAILURE"] = "1"
cmd_runner.run_command(
cmd=cmd,
cwd=workspace_root,
env=env,
capture=False,
# If we're create_new_actor_test we don't want
# to barf when resmoke fails. We expect it to fail.
check=False if is_cnats else True, # `not is_cnats` was hard to read.
)
_run_command_with_sentinel_report(
workspace_root=workspace_root,
genny_repo_root=genny_repo_root,
cmd_func=run_resmoke,
checker_func=checker_func,
)
| 35.613636 | 230 | 0.650606 |
e27c1e9a5280b4f0dcf59f81e3a3092375e4ccf2 | 36,946 | py | Python | airflow/executors/kubernetes_executor.py | growbots/incubator-airflow | a8a4d322ee960ef51a03a87db44fe352abb910e6 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2020-09-30T01:06:15.000Z | 2021-08-07T09:16:21.000Z | airflow/executors/kubernetes_executor.py | growbots/incubator-airflow | a8a4d322ee960ef51a03a87db44fe352abb910e6 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-07-01T18:16:43.000Z | 2020-07-01T18:16:43.000Z | airflow/executors/kubernetes_executor.py | growbots/incubator-airflow | a8a4d322ee960ef51a03a87db44fe352abb910e6 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2019-07-04T02:46:30.000Z | 2019-07-15T00:56:09.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import hashlib
from queue import Empty
import re
import json
import multiprocessing
from dateutil import parser
from uuid import uuid4
import kubernetes
from kubernetes import watch, client
from kubernetes.client.rest import ApiException
from airflow.configuration import conf
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.worker_configuration import WorkerConfiguration
from airflow.executors.base_executor import BaseExecutor
from airflow.executors import Executors
from airflow.models import KubeResourceVersion, KubeWorkerIdentifier, TaskInstance
from airflow.utils.state import State
from airflow.utils.db import provide_session, create_session
from airflow import configuration, settings
from airflow.exceptions import AirflowConfigException, AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
class KubernetesExecutorConfig:
def __init__(self, image=None, image_pull_policy=None, request_memory=None,
request_cpu=None, limit_memory=None, limit_cpu=None,
gcp_service_account_key=None, node_selectors=None, affinity=None,
annotations=None, volumes=None, volume_mounts=None, tolerations=None):
self.image = image
self.image_pull_policy = image_pull_policy
self.request_memory = request_memory
self.request_cpu = request_cpu
self.limit_memory = limit_memory
self.limit_cpu = limit_cpu
self.gcp_service_account_key = gcp_service_account_key
self.node_selectors = node_selectors
self.affinity = affinity
self.annotations = annotations
self.volumes = volumes
self.volume_mounts = volume_mounts
self.tolerations = tolerations
def __repr__(self):
return "{}(image={}, image_pull_policy={}, request_memory={}, request_cpu={}, " \
"limit_memory={}, limit_cpu={}, gcp_service_account_key={}, " \
"node_selectors={}, affinity={}, annotations={}, volumes={}, " \
"volume_mounts={}, tolerations={})" \
.format(KubernetesExecutorConfig.__name__, self.image, self.image_pull_policy,
self.request_memory, self.request_cpu, self.limit_memory,
self.limit_cpu, self.gcp_service_account_key, self.node_selectors,
self.affinity, self.annotations, self.volumes, self.volume_mounts,
self.tolerations)
@staticmethod
def from_dict(obj):
if obj is None:
return KubernetesExecutorConfig()
if not isinstance(obj, dict):
raise TypeError(
'Cannot convert a non-dictionary object into a KubernetesExecutorConfig')
namespaced = obj.get(Executors.KubernetesExecutor, {})
return KubernetesExecutorConfig(
image=namespaced.get('image', None),
image_pull_policy=namespaced.get('image_pull_policy', None),
request_memory=namespaced.get('request_memory', None),
request_cpu=namespaced.get('request_cpu', None),
limit_memory=namespaced.get('limit_memory', None),
limit_cpu=namespaced.get('limit_cpu', None),
gcp_service_account_key=namespaced.get('gcp_service_account_key', None),
node_selectors=namespaced.get('node_selectors', None),
affinity=namespaced.get('affinity', None),
annotations=namespaced.get('annotations', {}),
volumes=namespaced.get('volumes', []),
volume_mounts=namespaced.get('volume_mounts', []),
tolerations=namespaced.get('tolerations', None),
)
def as_dict(self):
return {
'image': self.image,
'image_pull_policy': self.image_pull_policy,
'request_memory': self.request_memory,
'request_cpu': self.request_cpu,
'limit_memory': self.limit_memory,
'limit_cpu': self.limit_cpu,
'gcp_service_account_key': self.gcp_service_account_key,
'node_selectors': self.node_selectors,
'affinity': self.affinity,
'annotations': self.annotations,
'volumes': self.volumes,
'volume_mounts': self.volume_mounts,
'tolerations': self.tolerations,
}
class KubeConfig:
core_section = 'core'
kubernetes_section = 'kubernetes'
def __init__(self):
configuration_dict = configuration.as_dict(display_sensitive=True)
self.core_configuration = configuration_dict['core']
self.kube_secrets = configuration_dict.get('kubernetes_secrets', {})
self.kube_env_vars = configuration_dict.get('kubernetes_environment_variables', {})
self.env_from_configmap_ref = configuration.get(self.kubernetes_section,
'env_from_configmap_ref')
self.env_from_secret_ref = configuration.get(self.kubernetes_section,
'env_from_secret_ref')
self.airflow_home = settings.AIRFLOW_HOME
self.dags_folder = configuration.get(self.core_section, 'dags_folder')
self.parallelism = configuration.getint(self.core_section, 'PARALLELISM')
self.worker_container_repository = configuration.get(
self.kubernetes_section, 'worker_container_repository')
self.worker_container_tag = configuration.get(
self.kubernetes_section, 'worker_container_tag')
self.kube_image = '{}:{}'.format(
self.worker_container_repository, self.worker_container_tag)
self.kube_image_pull_policy = configuration.get(
self.kubernetes_section, "worker_container_image_pull_policy"
)
self.kube_node_selectors = configuration_dict.get('kubernetes_node_selectors', {})
self.kube_annotations = configuration_dict.get('kubernetes_annotations', {})
self.kube_labels = configuration_dict.get('kubernetes_labels', {})
self.delete_worker_pods = conf.getboolean(
self.kubernetes_section, 'delete_worker_pods')
self.worker_pods_creation_batch_size = conf.getint(
self.kubernetes_section, 'worker_pods_creation_batch_size')
self.worker_service_account_name = conf.get(
self.kubernetes_section, 'worker_service_account_name')
self.image_pull_secrets = conf.get(self.kubernetes_section, 'image_pull_secrets')
# NOTE: user can build the dags into the docker image directly,
# this will set to True if so
self.dags_in_image = conf.getboolean(self.kubernetes_section, 'dags_in_image')
# Run as user for pod security context
self.worker_run_as_user = conf.get(self.kubernetes_section, 'run_as_user')
self.worker_fs_group = conf.get(self.kubernetes_section, 'fs_group')
# NOTE: `git_repo` and `git_branch` must be specified together as a pair
# The http URL of the git repository to clone from
self.git_repo = conf.get(self.kubernetes_section, 'git_repo')
# The branch of the repository to be checked out
self.git_branch = conf.get(self.kubernetes_section, 'git_branch')
# Optionally, the directory in the git repository containing the dags
self.git_subpath = conf.get(self.kubernetes_section, 'git_subpath')
# Optionally, the root directory for git operations
self.git_sync_root = conf.get(self.kubernetes_section, 'git_sync_root')
# Optionally, the name at which to publish the checked-out files under --root
self.git_sync_dest = conf.get(self.kubernetes_section, 'git_sync_dest')
# Optionally, if git_dags_folder_mount_point is set the worker will use
# {git_dags_folder_mount_point}/{git_sync_dest}/{git_subpath} as dags_folder
self.git_dags_folder_mount_point = conf.get(self.kubernetes_section,
'git_dags_folder_mount_point')
# Optionally a user may supply a (`git_user` AND `git_password`) OR
# (`git_ssh_key_secret_name` AND `git_ssh_key_secret_key`) for private repositories
self.git_user = conf.get(self.kubernetes_section, 'git_user')
self.git_password = conf.get(self.kubernetes_section, 'git_password')
self.git_ssh_key_secret_name = conf.get(self.kubernetes_section, 'git_ssh_key_secret_name')
self.git_ssh_known_hosts_configmap_name = conf.get(self.kubernetes_section,
'git_ssh_known_hosts_configmap_name')
# NOTE: The user may optionally use a volume claim to mount a PV containing
# DAGs directly
self.dags_volume_claim = conf.get(self.kubernetes_section, 'dags_volume_claim')
# This prop may optionally be set for PV Claims and is used to write logs
self.logs_volume_claim = conf.get(self.kubernetes_section, 'logs_volume_claim')
# This prop may optionally be set for PV Claims and is used to locate DAGs
# on a SubPath
self.dags_volume_subpath = conf.get(
self.kubernetes_section, 'dags_volume_subpath')
# This prop may optionally be set for PV Claims and is used to locate logs
# on a SubPath
self.logs_volume_subpath = conf.get(
self.kubernetes_section, 'logs_volume_subpath')
# Optionally, hostPath volume containing DAGs
self.dags_volume_host = conf.get(self.kubernetes_section, 'dags_volume_host')
# Optionally, write logs to a hostPath Volume
self.logs_volume_host = conf.get(self.kubernetes_section, 'logs_volume_host')
# This prop may optionally be set for PV Claims and is used to write logs
self.base_log_folder = configuration.get(self.core_section, 'base_log_folder')
# The Kubernetes Namespace in which the Scheduler and Webserver reside. Note
# that if your
# cluster has RBAC enabled, your scheduler may need service account permissions to
# create, watch, get, and delete pods in this namespace.
self.kube_namespace = conf.get(self.kubernetes_section, 'namespace')
# The Kubernetes Namespace in which pods will be created by the executor. Note
# that if your
# cluster has RBAC enabled, your workers may need service account permissions to
# interact with cluster components.
self.executor_namespace = conf.get(self.kubernetes_section, 'namespace')
# Task secrets managed by KubernetesExecutor.
self.gcp_service_account_keys = conf.get(self.kubernetes_section,
'gcp_service_account_keys')
# If the user is using the git-sync container to clone their repository via git,
# allow them to specify repository, tag, and pod name for the init container.
self.git_sync_container_repository = conf.get(
self.kubernetes_section, 'git_sync_container_repository')
self.git_sync_container_tag = conf.get(
self.kubernetes_section, 'git_sync_container_tag')
self.git_sync_container = '{}:{}'.format(
self.git_sync_container_repository, self.git_sync_container_tag)
self.git_sync_init_container_name = conf.get(
self.kubernetes_section, 'git_sync_init_container_name')
# The worker pod may optionally have a valid Airflow config loaded via a
# configmap
self.airflow_configmap = conf.get(self.kubernetes_section, 'airflow_configmap')
affinity_json = conf.get(self.kubernetes_section, 'affinity')
if affinity_json:
self.kube_affinity = json.loads(affinity_json)
else:
self.kube_affinity = None
tolerations_json = conf.get(self.kubernetes_section, 'tolerations')
if tolerations_json:
self.kube_tolerations = json.loads(tolerations_json)
else:
self.kube_tolerations = None
kube_client_request_args = conf.get(self.kubernetes_section, 'kube_client_request_args')
if kube_client_request_args:
self.kube_client_request_args = json.loads(kube_client_request_args)
if self.kube_client_request_args['_request_timeout'] and \
isinstance(self.kube_client_request_args['_request_timeout'], list):
self.kube_client_request_args['_request_timeout'] = \
tuple(self.kube_client_request_args['_request_timeout'])
else:
self.kube_client_request_args = {}
self._validate()
def _validate(self):
# TODO: use XOR for dags_volume_claim and git_dags_folder_mount_point
if not self.dags_volume_claim \
and not self.dags_volume_host \
and not self.dags_in_image \
and (not self.git_repo or not self.git_branch or not self.git_dags_folder_mount_point):
raise AirflowConfigException(
'In kubernetes mode the following must be set in the `kubernetes` '
'config section: `dags_volume_claim` '
'or `dags_volume_host` '
'or `dags_in_image` '
'or `git_repo and git_branch and git_dags_folder_mount_point`')
if self.git_repo \
and (self.git_user or self.git_password) \
and self.git_ssh_key_secret_name:
raise AirflowConfigException(
'In kubernetes mode, using `git_repo` to pull the DAGs: '
'for private repositories, either `git_user` and `git_password` '
'must be set for authentication through user credentials; '
'or `git_ssh_key_secret_name` must be set for authentication '
'through ssh key, but not both')
class KubernetesJobWatcher(multiprocessing.Process, LoggingMixin, object):
def __init__(self, namespace, watcher_queue, resource_version, worker_uuid, kube_config):
multiprocessing.Process.__init__(self)
self.namespace = namespace
self.worker_uuid = worker_uuid
self.watcher_queue = watcher_queue
self.resource_version = resource_version
self.kube_config = kube_config
def run(self):
kube_client = get_kube_client()
while True:
try:
self.resource_version = self._run(kube_client, self.resource_version,
self.worker_uuid, self.kube_config)
except Exception:
self.log.exception('Unknown error in KubernetesJobWatcher. Failing')
raise
else:
self.log.warn('Watch died gracefully, starting back up with: '
'last resource_version: %s', self.resource_version)
def _run(self, kube_client, resource_version, worker_uuid, kube_config):
self.log.info(
'Event: and now my watch begins starting at resource_version: %s',
resource_version
)
watcher = watch.Watch()
kwargs = {'label_selector': 'airflow-worker={}'.format(worker_uuid)}
if resource_version:
kwargs['resource_version'] = resource_version
if kube_config.kube_client_request_args:
for key, value in kube_config.kube_client_request_args.iteritems():
kwargs[key] = value
last_resource_version = None
for event in watcher.stream(kube_client.list_namespaced_pod, self.namespace,
**kwargs):
task = event['object']
self.log.info(
'Event: %s had an event of type %s',
task.metadata.name, event['type']
)
if event['type'] == 'ERROR':
return self.process_error(event)
self.process_status(
task.metadata.name, task.status.phase, task.metadata.labels,
task.metadata.resource_version
)
last_resource_version = task.metadata.resource_version
return last_resource_version
def process_error(self, event):
self.log.error(
'Encountered Error response from k8s list namespaced pod stream => %s',
event
)
raw_object = event['raw_object']
if raw_object['code'] == 410:
self.log.info(
'Kubernetes resource version is too old, must reset to 0 => %s',
raw_object['message']
)
# Return resource version 0
return '0'
raise AirflowException(
'Kubernetes failure for %s with code %s and message: %s',
raw_object['reason'], raw_object['code'], raw_object['message']
)
def process_status(self, pod_id, status, labels, resource_version):
if status == 'Pending':
self.log.info('Event: %s Pending', pod_id)
elif status == 'Failed':
self.log.info('Event: %s Failed', pod_id)
self.watcher_queue.put((pod_id, State.FAILED, labels, resource_version))
elif status == 'Succeeded':
self.log.info('Event: %s Succeeded', pod_id)
self.watcher_queue.put((pod_id, None, labels, resource_version))
elif status == 'Running':
self.log.info('Event: %s is Running', pod_id)
else:
self.log.warn(
'Event: Invalid state: %s on pod: %s with labels: %s with '
'resource_version: %s', status, pod_id, labels, resource_version
)
class AirflowKubernetesScheduler(LoggingMixin):
def __init__(self, kube_config, task_queue, result_queue, kube_client, worker_uuid):
self.log.debug("Creating Kubernetes executor")
self.kube_config = kube_config
self.task_queue = task_queue
self.result_queue = result_queue
self.namespace = self.kube_config.kube_namespace
self.log.debug("Kubernetes using namespace %s", self.namespace)
self.kube_client = kube_client
self.launcher = PodLauncher(kube_client=self.kube_client)
self.worker_configuration = WorkerConfiguration(kube_config=self.kube_config)
self._manager = multiprocessing.Manager()
self.watcher_queue = self._manager.Queue()
self.worker_uuid = worker_uuid
self.kube_watcher = self._make_kube_watcher()
def _make_kube_watcher(self):
resource_version = KubeResourceVersion.get_current_resource_version()
watcher = KubernetesJobWatcher(self.namespace, self.watcher_queue,
resource_version, self.worker_uuid, self.kube_config)
watcher.start()
return watcher
def _health_check_kube_watcher(self):
if self.kube_watcher.is_alive():
pass
else:
self.log.error(
'Error while health checking kube watcher process. '
'Process died for unknown reasons')
self.kube_watcher = self._make_kube_watcher()
def run_next(self, next_job):
"""
The run_next command will check the task_queue for any un-run jobs.
It will then create a unique job-id, launch that job in the cluster,
and store relevant info in the current_jobs map so we can track the job's
status
"""
self.log.info('Kubernetes job is %s', str(next_job))
key, command, kube_executor_config = next_job
dag_id, task_id, execution_date, try_number = key
self.log.debug("Kubernetes running for command %s", command)
self.log.debug("Kubernetes launching image %s", self.kube_config.kube_image)
pod = self.worker_configuration.make_pod(
namespace=self.namespace, worker_uuid=self.worker_uuid,
pod_id=self._create_pod_id(dag_id, task_id),
dag_id=self._make_safe_label_value(dag_id),
task_id=self._make_safe_label_value(task_id),
try_number=try_number,
execution_date=self._datetime_to_label_safe_datestring(execution_date),
airflow_command=command, kube_executor_config=kube_executor_config
)
# the watcher will monitor pods, so we do not block.
self.launcher.run_pod_async(pod, **self.kube_config.kube_client_request_args)
self.log.debug("Kubernetes Job created!")
def delete_pod(self, pod_id):
if self.kube_config.delete_worker_pods:
try:
self.kube_client.delete_namespaced_pod(
pod_id, self.namespace, body=client.V1DeleteOptions(),
**self.kube_config.kube_client_request_args)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def sync(self):
"""
The sync function checks the status of all currently running kubernetes jobs.
If a job is completed, it's status is placed in the result queue to
be sent back to the scheduler.
:return:
"""
self._health_check_kube_watcher()
while True:
try:
task = self.watcher_queue.get_nowait()
try:
self.process_watcher_task(task)
finally:
self.watcher_queue.task_done()
except Empty:
break
def process_watcher_task(self, task):
pod_id, state, labels, resource_version = task
self.log.info(
'Attempting to finish pod; pod_id: %s; state: %s; labels: %s',
pod_id, state, labels
)
key = self._labels_to_key(labels=labels)
if key:
self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
self.result_queue.put((key, state, pod_id, resource_version))
@staticmethod
def _strip_unsafe_kubernetes_special_chars(string):
"""
Kubernetes only supports lowercase alphanumeric characters and "-" and "." in
the pod name
However, there are special rules about how "-" and "." can be used so let's
only keep
alphanumeric chars see here for detail:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
:param string: The requested Pod name
:return: ``str`` Pod name stripped of any unsafe characters
"""
return ''.join(ch.lower() for ind, ch in enumerate(string) if ch.isalnum())
@staticmethod
def _make_safe_pod_id(safe_dag_id, safe_task_id, safe_uuid):
"""
Kubernetes pod names must be <= 253 chars and must pass the following regex for
validation
"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"
:param safe_dag_id: a dag_id with only alphanumeric characters
:param safe_task_id: a task_id with only alphanumeric characters
:param random_uuid: a uuid
:return: ``str`` valid Pod name of appropriate length
"""
MAX_POD_ID_LEN = 253
safe_key = safe_dag_id + safe_task_id
safe_pod_id = safe_key[:MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid
return safe_pod_id
@staticmethod
def _make_safe_label_value(string):
"""
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is then greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53chars, and append it with a unique hash.
"""
MAX_LABEL_LEN = 63
safe_label = re.sub(r'^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$', '', string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = hashlib.md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[:MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label
@staticmethod
def _create_pod_id(dag_id, task_id):
safe_dag_id = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
dag_id)
safe_task_id = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
task_id)
safe_uuid = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
uuid4().hex)
return AirflowKubernetesScheduler._make_safe_pod_id(safe_dag_id, safe_task_id,
safe_uuid)
@staticmethod
def _label_safe_datestring_to_datetime(string):
"""
Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not
"_", let's
replace ":" with "_"
:param string: str
:return: datetime.datetime object
"""
return parser.parse(string.replace('_plus_', '+').replace("_", ":"))
@staticmethod
def _datetime_to_label_safe_datestring(datetime_obj):
"""
Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but
not "_" let's
replace ":" with "_"
:param datetime_obj: datetime.datetime object
:return: ISO-like string representing the datetime
"""
return datetime_obj.isoformat().replace(":", "_").replace('+', '_plus_')
def _labels_to_key(self, labels):
try_num = 1
try:
try_num = int(labels.get('try_number', '1'))
except ValueError:
self.log.warn("could not get try_number as an int: %s", labels.get('try_number', '1'))
try:
dag_id = labels['dag_id']
task_id = labels['task_id']
ex_time = self._label_safe_datestring_to_datetime(labels['execution_date'])
except Exception as e:
self.log.warn(
'Error while retrieving labels; labels: %s; exception: %s',
labels, e
)
return None
with create_session() as session:
tasks = (
session
.query(TaskInstance)
.filter_by(execution_date=ex_time).all()
)
self.log.info(
'Checking %s task instances.',
len(tasks)
)
for task in tasks:
if (
self._make_safe_label_value(task.dag_id) == dag_id and
self._make_safe_label_value(task.task_id) == task_id and
task.execution_date == ex_time
):
self.log.info(
'Found matching task %s-%s (%s) with current state of %s',
task.dag_id, task.task_id, task.execution_date, task.state
)
dag_id = task.dag_id
task_id = task.task_id
return (dag_id, task_id, ex_time, try_num)
self.log.warn(
'Failed to find and match task details to a pod; labels: %s',
labels
)
return None
def terminate(self):
self.watcher_queue.join()
self._manager.shutdown()
class KubernetesExecutor(BaseExecutor, LoggingMixin):
def __init__(self):
self.kube_config = KubeConfig()
self.task_queue = None
self.result_queue = None
self.kube_scheduler = None
self.kube_client = None
self.worker_uuid = None
self._manager = multiprocessing.Manager()
super().__init__(parallelism=self.kube_config.parallelism)
@provide_session
def clear_not_launched_queued_tasks(self, session=None):
"""
If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
may not
have been launched Thus, on starting up the scheduler let's check every
"Queued" task to
see if it has been launched (ie: if there is a corresponding pod on kubernetes)
If it has been launched then do nothing, otherwise reset the state to "None" so
the task
will be rescheduled
This will not be necessary in a future version of airflow in which there is
proper support
for State.LAUNCHED
"""
queued_tasks = session\
.query(TaskInstance)\
.filter(TaskInstance.state == State.QUEUED).all()
self.log.info(
'When executor started up, found %s queued task instances',
len(queued_tasks)
)
for task in queued_tasks:
dict_string = (
"dag_id={},task_id={},execution_date={},airflow-worker={}".format(
AirflowKubernetesScheduler._make_safe_label_value(task.dag_id),
AirflowKubernetesScheduler._make_safe_label_value(task.task_id),
AirflowKubernetesScheduler._datetime_to_label_safe_datestring(
task.execution_date
),
self.worker_uuid
)
)
kwargs = dict(label_selector=dict_string)
if self.kube_config.kube_client_request_args:
for key, value in self.kube_config.kube_client_request_args.iteritems():
kwargs[key] = value
pod_list = self.kube_client.list_namespaced_pod(
self.kube_config.kube_namespace, **kwargs)
if len(pod_list.items) == 0:
self.log.info(
'TaskInstance: %s found in queued state but was not launched, '
'rescheduling', task
)
session.query(TaskInstance).filter(
TaskInstance.dag_id == task.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.execution_date == task.execution_date
).update({TaskInstance.state: State.NONE})
def _inject_secrets(self):
def _create_or_update_secret(secret_name, secret_path):
try:
return self.kube_client.create_namespaced_secret(
self.kube_config.executor_namespace, kubernetes.client.V1Secret(
data={
'key.json': base64.b64encode(open(secret_path, 'r').read())},
metadata=kubernetes.client.V1ObjectMeta(name=secret_name)),
**self.kube_config.kube_client_request_args)
except ApiException as e:
if e.status == 409:
return self.kube_client.replace_namespaced_secret(
secret_name, self.kube_config.executor_namespace,
kubernetes.client.V1Secret(
data={'key.json': base64.b64encode(
open(secret_path, 'r').read())},
metadata=kubernetes.client.V1ObjectMeta(name=secret_name)),
**self.kube_config.kube_client_request_args)
self.log.exception(
'Exception while trying to inject secret. '
'Secret name: %s, error details: %s',
secret_name, e
)
raise
# For each GCP service account key, inject it as a secret in executor
# namespace with the specific secret name configured in the airflow.cfg.
# We let exceptions to pass through to users.
if self.kube_config.gcp_service_account_keys:
name_path_pair_list = [
{'name': account_spec.strip().split('=')[0],
'path': account_spec.strip().split('=')[1]}
for account_spec in self.kube_config.gcp_service_account_keys.split(',')]
for service_account in name_path_pair_list:
_create_or_update_secret(service_account['name'], service_account['path'])
def start(self):
self.log.info('Start Kubernetes executor')
self.worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid()
self.log.debug('Start with worker_uuid: %s', self.worker_uuid)
# always need to reset resource version since we don't know
# when we last started, note for behavior below
# https://github.com/kubernetes-client/python/blob/master/kubernetes/docs
# /CoreV1Api.md#list_namespaced_pod
KubeResourceVersion.reset_resource_version()
self.task_queue = self._manager.Queue()
self.result_queue = self._manager.Queue()
self.kube_client = get_kube_client()
self.kube_scheduler = AirflowKubernetesScheduler(
self.kube_config, self.task_queue, self.result_queue,
self.kube_client, self.worker_uuid
)
self._inject_secrets()
self.clear_not_launched_queued_tasks()
def execute_async(self, key, command, queue=None, executor_config=None):
self.log.info(
'Add task %s with command %s with executor_config %s',
key, command, executor_config
)
kube_executor_config = KubernetesExecutorConfig.from_dict(executor_config)
self.task_queue.put((key, command, kube_executor_config))
def sync(self):
if self.running:
self.log.debug('self.running: %s', self.running)
if self.queued_tasks:
self.log.debug('self.queued: %s', self.queued_tasks)
self.kube_scheduler.sync()
last_resource_version = None
while True:
try:
results = self.result_queue.get_nowait()
try:
key, state, pod_id, resource_version = results
last_resource_version = resource_version
self.log.info('Changing state of %s to %s', results, state)
try:
self._change_state(key, state, pod_id)
except Exception as e:
self.log.exception('Exception: %s when attempting ' +
'to change state of %s to %s, re-queueing.', e, results, state)
self.result_queue.put(results)
finally:
self.result_queue.task_done()
except Empty:
break
KubeResourceVersion.checkpoint_resource_version(last_resource_version)
for _ in range(self.kube_config.worker_pods_creation_batch_size):
try:
task = self.task_queue.get_nowait()
try:
self.kube_scheduler.run_next(task)
except ApiException:
self.log.exception('ApiException when attempting to run task, re-queueing.')
self.task_queue.put(task)
finally:
self.task_queue.task_done()
except Empty:
break
def _change_state(self, key, state, pod_id):
if state != State.RUNNING:
self.kube_scheduler.delete_pod(pod_id)
try:
self.log.info('Deleted pod: %s', str(key))
self.running.pop(key)
except KeyError:
self.log.debug('Could not find key: %s', str(key))
pass
self.event_buffer[key] = state
(dag_id, task_id, ex_time, try_number) = key
with create_session() as session:
item = session.query(TaskInstance).filter_by(
dag_id=dag_id,
task_id=task_id,
execution_date=ex_time
).one()
if state:
item.state = state
session.add(item)
def end(self):
self.log.info('Shutting down Kubernetes executor')
self.task_queue.join()
self.result_queue.join()
if self.kube_scheduler:
self.kube_scheduler.terminate()
self._manager.shutdown()
| 45.166259 | 106 | 0.62724 |
be3256e2c236eeac4eed56c05fea9b64646894a7 | 2,485 | py | Python | docs/conf.py | dottinf/dimcli | 708e83675afa6279424487c5b7417f5393c480bb | [
"MIT"
] | 1 | 2020-04-15T06:16:33.000Z | 2020-04-15T06:16:33.000Z | docs/conf.py | dottinf/dimcli | 708e83675afa6279424487c5b7417f5393c480bb | [
"MIT"
] | null | null | null | docs/conf.py | dottinf/dimcli | 708e83675afa6279424487c5b7417f5393c480bb | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, '/Users/michele.pasin/Dropbox/code/python/dimcli/dimcli_project/dimcli')
# -- Project information -----------------------------------------------------
project = 'dimcli'
copyright = '2020, Author'
author = 'Author'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
| 35 | 93 | 0.652716 |
2857269d5f5356ac7c3829dea684674fac327bad | 10,427 | py | Python | animations/lm95p1.py | TristanCacqueray/demo-render | 4c8403e684165e5e75c046ee023c1f794a6650a8 | [
"Apache-2.0"
] | 9 | 2018-02-19T14:17:12.000Z | 2021-03-27T14:46:28.000Z | animations/lm95p1.py | TristanCacqueray/demo-render | 4c8403e684165e5e75c046ee023c1f794a6650a8 | [
"Apache-2.0"
] | null | null | null | animations/lm95p1.py | TristanCacqueray/demo-render | 4c8403e684165e5e75c046ee023c1f794a6650a8 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SIZE=6
# lm95 anim:
# 0 - 1125 intro
./lm95p1.py --record /tmp/anim --wav ../render_data/lm95p1.wav --steps 1125 --anim zoom --size $SIZE
# 1125 - 1575 transition
./lm95p1.py --record /tmp/anim/ --wav ../render_data/lm95p1.wav --steps 420 --anim zoom2 --frame_start 1125 --size $SIZE
./lm95p1.py --record /tmp/anim/ --wav ../render_data/lm95p1.wav --steps 30 --anim tr1 --frame_start 1545 --size $SIZE
# 1575 - 2500 part1
./lm95p1.py --record /tmp/anim/ --wav ../render_data/lm95p1.wav --steps 925 --anim traveling --frame_start 1575 --size $SIZE
# 2500 - 2950 part2
./lm95p1.py --record /tmp/anim/ --wav ../render_data/lm95p1.wav --steps 18 --anim tr2 --frame_start 2500 --size $SIZE
./lm95p1.py --record /tmp/anim/ --wav ../render_data/lm95p1.wav --steps 875 --anim trippy --frame_start 2518 --size $SIZE
# 3293 - 4018 zoomout
./lm95p1.py --record /tmp/anim/ --wav ../render_data/lm95p1.wav --steps 625 --anim zoomout --frame_start 3393 --size $SIZE
"""
import random
import pygame
import numpy as np
from utils_v1.common import usage_cli_complex, run_main
from utils_v1.pygame_utils import Screen
from utils_v1.julia_set import JuliaSet
from utils_v1.scipy_utils import AudioMod
def jiggle(args, audio_mod=None):
modulations = {
"zoom": {
"begin": {
# "c": (-1.9672144721067721+3.72412944951802e-12j),
"c": (-1.8822299686241232+0.001j), # +3.72412944951802e-12j),
"radius": 0.300338745117,
},
"end": {
"c": (-1.7822299686241232+0.001j), # 3.72412944951802e-12j),
"radius": 3.01697554849e-02,
},
"radius_space": "log",
"audio_mod": {
"c_imag": -0.0015,
},
# "space": "log",
"max_iter": 2048,
},
"zoom2": {
"begin": {
"c": (-1.7822299686241232+0.0005166571710672173j),
"radius": 0.0301697554849,
"audio_mod": 1,
},
"end": {
"c": (-1.7810844610161822+2.729043964254327e-07j),
# "radius": 0.00088688411246, # 0.00330763749309, # 1.70280390934e-05,
"radius": 0.002,
"audio_mod": 0.02,
},
"radius_space": "-log",
"audio_mod": {
"c_real": 0.002,
},
"skip_real": True,
"max_iter": 2048,
},
"tr1": {
"begin": {
"c": -1.7822231418976315+2.729043964254327e-07j,
"radius": 0.002,
},
"end": {
"c": -1.7822206200202013+2.729043964254327e-07j,
"radius": 0.00088688411246,
},
"radius_space": "log",
"max_iter": 2048,
},
"traveling": {
"begin": {
"c": -1.7822206200202013+2.729043964254327e-07j,
"radius": 0.000888688411246,
},
"end": {
"c": -1.782191182216574-0j,
"radius": 0.000888688411246,
},
"c_space_image": "log",
"audio_mod": {
"color": 2,
"c_real": 0.000002,
},
"max_iter": 2048,
},
"tr2": {
"begin": {
"c": -1.7821907956568581+0j,
"radius": 0.000888688411246,
},
"end": {
"radius": 0.000281186567621,
"c": -1.7821911201074472-1e-12j,
},
"max_iter": 2048,
},
"trippy": {
"begin": {
"c": -1.7821911173201472-1e-12j,
"radius": 0.000281186567621,
},
"end": {
"c": -1.7821911201074472-8e-10j,
},
"audio_mod": {
"c_real": -0.00000005
},
"max_iter": 4096,
},
"zoomout": {
"begin": {
"c": -1.78219112855035-8e-10j,
"radius": 0.000281186567621,
"audio_mod": 0.1,
},
"end": {
"c": -1.7821911201074472-8e-08j,
"radius": 0.3,
"audio_mod": 3000000,
},
"audio_mod": {
"c_real": 0.0000005,
"c_imag": 0.0000005,
},
"audio_mod_space": "log",
"radius_space": "log",
},
"spiral": {
"begin": {
"c": (-0.788+0.052j),
"radius": 0.2,
# "c": (-0.758+0.052j), # thighter
},
"end": {
"c": (-0.7535+0.056j),
},
"audio_mod": {
"c_real": 0.065,
"c_imag": 0.04,
},
"skip_imag": True,
"center": complex(-0.41, -0.0554),
"max_iter": 600,
"loop": True
},
"new": {
"begin": {
"c": (-1.789228770017097+9.647348357832448e-13j),
},
"end": {
"c": (-1.789228770017097+9.647348357832448e-13j),
# "c": None,
},
"seed": (0.4699999999999999-0.23j),
# "only_imag": True,
"range": 0.04,
"center": 0,
"max_iter": 1000,
"radius": 9.5458991964e-06,
}
}
if args.anim is None:
mod = random.choice(modulations.values())
else:
mod = modulations[args.anim]
path_steps = args.steps
if mod.get("loop"):
path_steps = path_steps // 2
def get_path(begin, end, space="lin"):
if space == "log":
if space == "-log":
begin, end = end, begin
path = np.geomspace(begin, end, path_steps)
if space == "-log":
path = path[::-1]
else:
path = np.linspace(begin, end, path_steps)
return path
def loop_path(path):
return np.append(np.append(path, path[:-1][::-1]), path[:1])
mod_rad, mod_real, mod_imag, mod_pow = None, None, None, None
if mod.get("begin") and mod.get("end"):
if mod["begin"].get("c"):
mod["seed"] = mod["begin"]["c"]
if mod["begin"].get("c") and mod["end"].get("c"):
mod_real = get_path(mod["begin"]["c"].real, mod["end"]["c"].real,
mod.get("c_space_real", "lin"))
mod_imag = get_path(mod["begin"]["c"].imag, mod["end"]["c"].imag,
mod.get("c_space_imag", "lin"))
if mod["begin"].get("radius") and mod["end"].get("radius"):
mod_rad = get_path(mod["begin"]["radius"], mod["end"]["radius"],
mod.get("radius_space", "lin"))
if mod["begin"].get("audio_mod"):
mod_pow = get_path(mod["begin"]["audio_mod"],
mod["end"]["audio_mod"],
mod.get("audio_mod_space", "-log"))
if mod.get("loop"):
mod_real = loop_path(mod_real)
mod_imag = loop_path(mod_imag)
if mod_rad:
mod_rad = loop_path(mod_rad)
def update_view(scene, frame):
radius = mod["begin"].get("radius", 1)
if mod_rad is not None:
radius = mod_rad[frame]
center = mod.get("center")
scene.set_view(center=center, radius=radius)
scene.max_iter = mod.get("max_iter", args.max_iter)
if mod.get("skip_imag"):
seed_imag = mod["begin"]["c"].imag
else:
seed_imag = mod_imag[frame]
if mod.get("skip_real"):
seed_real = mod["begin"]["c"].real
else:
seed_real = mod_real[frame]
if audio_mod and mod.get("audio_mod"):
amod = audio_mod.get(frame + args.frame_start)
if mod_pow is not None:
p = mod_pow[frame]
else:
p = 1
if mod["audio_mod"].get("c_imag"):
seed_imag += amod * mod["audio_mod"]["c_imag"] * p
if mod["audio_mod"].get("c_real"):
seed_real += amod * mod["audio_mod"]["c_real"] * p
if mod["audio_mod"].get("color"):
cmod = mod["audio_mod"]["color"] * amod
scene.color_vector = np.vectorize(args.color(args.max_iter,
cmod))
scene.c = complex(seed_real, seed_imag)
return update_view
def main():
args = usage_cli_complex(worker=1)
if not args.steps:
print("Set --steps for the number of frame")
exit(1)
audio_mod = AudioMod("lm95p1.wav", 4019, 1)
screen = Screen(args.winsize)
scene = JuliaSet(args)
screen.add(scene)
clock = pygame.time.Clock()
if not args.anim:
args.anim = "spiral"
audio_mod = None
animation = jiggle(args, audio_mod)
for frame in range(args.skip, args.steps):
animation(scene, frame)
scene.render(frame)
screen.update()
pygame.display.update()
if args.record:
screen.capture(args.record, frame + args.frame_start)
clock.tick(args.fps)
if args.video and args.record:
import subprocess
subprocess.Popen([
"ffmpeg", "-y", "-framerate", str(args.fps),
"-start_number", str(args.skip),
"-i", "%s/%%04d.png" % args.record,
"-i", args.wav,
"-c:a", "libvorbis", "-c:v", "libvpx", "-threads", "4",
"-b:v", "5M",
"%s/%04d-%s.webm" % (args.record, args.skip, args.anim)
]).wait()
if __name__ == "__main__":
run_main(main)
| 33.744337 | 127 | 0.48432 |
1e88a007449cf2b1d937e4a706a17048fc065695 | 1,114 | py | Python | data_management/toolboxes/scripts/BlankScript.py | conklinbd/solutions-geoprocessing-toolbox | 7afab793ea34b7e7cb7e32757e8a150b6637ffd2 | [
"Apache-2.0"
] | null | null | null | data_management/toolboxes/scripts/BlankScript.py | conklinbd/solutions-geoprocessing-toolbox | 7afab793ea34b7e7cb7e32757e8a150b6637ffd2 | [
"Apache-2.0"
] | null | null | null | data_management/toolboxes/scripts/BlankScript.py | conklinbd/solutions-geoprocessing-toolbox | 7afab793ea34b7e7cb7e32757e8a150b6637ffd2 | [
"Apache-2.0"
] | 1 | 2018-10-25T15:52:41.000Z | 2018-10-25T15:52:41.000Z | #-------------------------------------------------------------------------------
# Copyright 2010-2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
#--------ESRI 2010-------------------------------------
# Blank Script
# This script is intentionally empty.
# It is intended to provide the basis for a script tool, the
# sole job of which is to provide the opportunity to add
# parameter validation to models
# INPUTS:
# none
# OUTPUTS:
# none
#
# Date: June 10, 2010
#------------------------------------------------------
| 39.785714 | 80 | 0.572711 |
b02a07090c51eff010d63aad505dd4d20f16e3eb | 22,191 | py | Python | emotion_recognition.py | junction-kabantchiki/emotion-recognition-using-speech | d7e51c96cbb7f93bfa07f501dfe2eaf799acfc9a | [
"MIT"
] | null | null | null | emotion_recognition.py | junction-kabantchiki/emotion-recognition-using-speech | d7e51c96cbb7f93bfa07f501dfe2eaf799acfc9a | [
"MIT"
] | null | null | null | emotion_recognition.py | junction-kabantchiki/emotion-recognition-using-speech | d7e51c96cbb7f93bfa07f501dfe2eaf799acfc9a | [
"MIT"
] | null | null | null | from data_extractor import load_data
from utils import extract_feature, AVAILABLE_EMOTIONS
from create_csv import write_emodb_csv, write_tess_ravdess_csv, write_custom_csv
from sklearn.metrics import accuracy_score, make_scorer, fbeta_score, mean_squared_error, mean_absolute_error
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as pl
from time import time
from utils import get_best_estimators, get_audio_config
import numpy as np
import tqdm
import os
import random
import pandas as pd
class EmotionRecognizer:
"""A class for training, testing and predicting emotions based on
speech's features that are extracted and fed into `sklearn` or `keras` model"""
def __init__(self, model, **kwargs):
"""
Params:
model (sklearn model): the model used to detect emotions.
emotions (list): list of emotions to be used. Note that these emotions must be available in
RAVDESS_TESS & EMODB Datasets, available nine emotions are the following:
'neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'ps' ( pleasant surprised ), 'boredom'.
Default is ["sad", "neutral", "happy"].
tess_ravdess (bool): whether to use TESS & RAVDESS Speech datasets, default is True
emodb (bool): whether to use EMO-DB Speech dataset, default is True,
custom_db (bool): whether to use custom Speech dataset that is located in `data/train-custom`
and `data/test-custom`, default is True
tess_ravdess_name (str): the name of the output CSV file for TESS&RAVDESS dataset, default is "tess_ravdess.csv"
emodb_name (str): the name of the output CSV file for EMO-DB dataset, default is "emodb.csv"
custom_db_name (str): the name of the output CSV file for the custom dataset, default is "custom.csv"
features (list): list of speech features to use, default is ["mfcc", "chroma", "mel"]
(i.e MFCC, Chroma and MEL spectrogram )
classification (bool): whether to use classification or regression, default is True
balance (bool): whether to balance the dataset ( both training and testing ), default is True
verbose (bool/int): whether to print messages on certain tasks, default is 1
Note that when `tess_ravdess`, `emodb` and `custom_db` are set to `False`, `tess_ravdess` will be set to True
automatically.
"""
# model
self.model = model
# emotions
self.emotions = kwargs.get("emotions", ["sad", "neutral", "happy"])
# make sure that there are only available emotions
self._verify_emotions()
# audio config
self.features = kwargs.get("features", ["mfcc", "chroma", "mel"])
self.audio_config = get_audio_config(self.features)
# datasets
self.tess_ravdess = kwargs.get("tess_ravdess", True)
self.emodb = kwargs.get("emodb", True)
self.custom_db = kwargs.get("custom_db", True)
if not self.tess_ravdess and not self.emodb and not self.custom_db:
self.tess_ravdess = True
self.classification = kwargs.get("classification", True)
self.balance = kwargs.get("balance", True)
self.override_csv = kwargs.get("override_csv", True)
self.verbose = kwargs.get("verbose", 1)
self.tess_ravdess_name = kwargs.get("tess_ravdess_name", "tess_ravdess.csv")
self.emodb_name = kwargs.get("emodb_name", "emodb.csv")
self.custom_db_name = kwargs.get("custom_db_name", "custom.csv")
self.verbose = kwargs.get("verbose", 1)
# set metadata path file names
self._set_metadata_filenames()
# boolean attributes
self.data_loaded = False
self.model_trained = False
def _set_metadata_filenames(self):
"""
Protected method to get all CSV (metadata) filenames into two instance attributes:
- `self.train_desc_files` for training CSVs
- `self.test_desc_files` for testing CSVs
"""
train_desc_files, test_desc_files = [], []
if self.tess_ravdess:
train_desc_files.append(f"train_{self.tess_ravdess_name}")
test_desc_files.append(f"test_{self.tess_ravdess_name}")
if self.emodb:
train_desc_files.append(f"train_{self.emodb_name}")
test_desc_files.append(f"test_{self.emodb_name}")
if self.custom_db:
train_desc_files.append(f"train_{self.custom_db_name}")
test_desc_files.append(f"test_{self.custom_db_name}")
# set them to be object attributes
self.train_desc_files = train_desc_files
self.test_desc_files = test_desc_files
def _verify_emotions(self):
"""
This method makes sure that emotions passed in parameters are valid.
"""
for emotion in self.emotions:
assert emotion in AVAILABLE_EMOTIONS, "Emotion not recognized."
def get_best_estimators(self):
"""Loads estimators from grid files and returns them"""
return get_best_estimators(self.classification)
def write_csv(self):
"""
Write available CSV files in `self.train_desc_files` and `self.test_desc_files`
determined by `self._set_metadata_filenames()` method.
"""
for train_csv_file, test_csv_file in zip(self.train_desc_files, self.test_desc_files):
# not safe approach
if os.path.isfile(train_csv_file) and os.path.isfile(test_csv_file):
# file already exists, just skip writing csv files
if not self.override_csv:
continue
if self.emodb_name in train_csv_file:
write_emodb_csv(self.emotions, train_name=train_csv_file, test_name=test_csv_file, verbose=self.verbose)
if self.verbose:
print("[+] Writed EMO-DB CSV File")
elif self.tess_ravdess_name in train_csv_file:
write_tess_ravdess_csv(self.emotions, train_name=train_csv_file, test_name=test_csv_file, verbose=self.verbose)
if self.verbose:
print("[+] Writed TESS & RAVDESS DB CSV File")
elif self.custom_db_name in train_csv_file:
write_custom_csv(emotions=self.emotions, train_name=train_csv_file, test_name=test_csv_file, verbose=self.verbose)
if self.verbose:
print("[+] Writed Custom DB CSV File")
def load_data(self):
"""
Loads and extracts features from the audio files for the db's specified
"""
if not self.data_loaded:
result = load_data(self.train_desc_files, self.test_desc_files, self.audio_config, self.classification,
emotions=self.emotions, balance=self.balance)
self.X_train = result['X_train']
self.X_test = result['X_test']
self.y_train = result['y_train']
self.y_test = result['y_test']
self.train_audio_paths = result['train_audio_paths']
self.test_audio_paths = result['test_audio_paths']
if self.verbose:
print("[+] Data loaded")
self.data_loaded = True
def train(self, verbose=1):
"""
Train the model, if data isn't loaded, it 'll be loaded automatically
"""
if not self.data_loaded:
# if data isn't loaded yet, load it then
self.load_data()
if not self.model_trained:
self.model.fit(X=self.X_train, y=self.y_train)
self.model_trained = True
if verbose:
print("[+] Model trained")
def predict(self, audio_path):
"""
given an `audio_path`, this method extracts the features
and predicts the emotion
"""
feature = extract_feature(audio_path, **self.audio_config).reshape(1, -1)
return self.model.predict(feature)[0]
def predict_proba(self, audio_path):
"""
Predicts the probability of each emotion.
"""
if self.classification:
feature = extract_feature(audio_path, **self.audio_config).reshape(1, -1)
proba = self.model.predict_proba(feature)[0]
result = {}
for emotion, prob in zip(self.emotions, proba):
result[emotion] = prob
return result
else:
raise NotImplementedError("Probability prediction doesn't make sense for regression")
def grid_search(self, params, n_jobs=2):
"""
Performs GridSearchCV on `params` passed on the `self.model`
And returns the tuple: (best_estimator, best_params, best_score).
"""
score = accuracy_score if self.classification else mean_absolute_error
grid = GridSearchCV(estimator=self.model, param_grid=params, scoring=make_scorer(score),
n_jobs=n_jobs, verbose=1, cv=3)
grid_result = grid.fit(self.X_train, self.y_train)
return grid_result.best_estimator_, grid_result.best_params_, grid_result.best_score_
def determine_best_model(self, train=True):
"""
Loads best estimators and determine which is best for test data,
and then set it to `self.model`.
if `train` is True, then train that model on train data, so the model
will be ready for inference.
In case of regression, the metric used is MSE and accuracy for classification.
Note that the execution of this method may take several minutes due
to training all estimators (stored in `grid` folder) for determining the best possible one.
"""
if not self.data_loaded:
self.load_data()
# loads estimators
estimators = self.get_best_estimators()
result = []
if self.verbose:
estimators = tqdm.tqdm(estimators)
for estimator, params, cv_score in estimators:
if self.verbose:
estimators.set_description(f"Evaluating {estimator.__class__.__name__}")
detector = EmotionRecognizer(estimator, emotions=self.emotions, tess_ravdess=self.tess_ravdess,
emodb=self.emodb, custom_db=self.custom_db, classification=self.classification,
features=self.features, balance=self.balance, override_csv=False)
# data already loaded
detector.X_train = self.X_train
detector.X_test = self.X_test
detector.y_train = self.y_train
detector.y_test = self.y_test
detector.data_loaded = True
# train the model
detector.train(verbose=0)
# get test accuracy
accuracy = detector.test_score()
# append to result
result.append((detector.model, accuracy))
# sort the result
if self.classification:
result = sorted(result, key=lambda item: item[1], reverse=True)
else:
# regression, best is the lower, not the higher
result = sorted(result, key=lambda item: item[1], reverse=False)
best_estimator = result[0][0]
accuracy = result[0][1]
self.model = best_estimator
self.model_trained = True
if self.verbose:
if self.classification:
print(f"[+] Best model determined: {self.model.__class__.__name__} with {accuracy*100:.3f}% test accuracy")
else:
print(f"[+] Best model determined: {self.model.__class__.__name__} with {accuracy:.5f} mean absolute error")
def test_score(self):
"""
Calculates score on testing data
if `self.classification` is True, the metric used is accuracy,
Mean-Squared-Error is used otherwise (regression)
"""
y_pred = self.model.predict(self.X_test)
if self.classification:
return accuracy_score(y_true=self.y_test, y_pred=y_pred)
else:
return mean_squared_error(y_true=self.y_test, y_pred=y_pred)
def train_score(self):
"""
Calculates accuracy score on training data
if `self.classification` is True, the metric used is accuracy,
Mean-Squared-Error is used otherwise (regression)
"""
y_pred = self.model.predict(self.X_train)
if self.classification:
return accuracy_score(y_true=self.y_train, y_pred=y_pred)
else:
return mean_squared_error(y_true=self.y_train, y_pred=y_pred)
def train_fbeta_score(self, beta):
y_pred = self.model.predict(self.X_train)
return fbeta_score(self.y_train, y_pred, beta, average='micro')
def test_fbeta_score(self, beta):
y_pred = self.model.predict(self.X_test)
return fbeta_score(self.y_test, y_pred, beta, average='micro')
def confusion_matrix(self, percentage=True, labeled=True):
"""
Computes confusion matrix to evaluate the test accuracy of the classification
and returns it as numpy matrix or pandas dataframe (depends on params).
params:
percentage (bool): whether to use percentage instead of number of samples, default is True.
labeled (bool): whether to label the columns and indexes in the dataframe.
"""
if not self.classification:
raise NotImplementedError("Confusion matrix works only when it is a classification problem")
y_pred = self.model.predict(self.X_test)
matrix = confusion_matrix(self.y_test, y_pred, labels=self.emotions).astype(np.float32)
if percentage:
for i in range(len(matrix)):
matrix[i] = matrix[i] / np.sum(matrix[i])
# make it percentage
matrix *= 100
if labeled:
matrix = pd.DataFrame(matrix, index=[ f"true_{e}" for e in self.emotions ],
columns=[ f"predicted_{e}" for e in self.emotions ])
return matrix
def draw_confusion_matrix(self):
"""Calculates the confusion matrix and shows it"""
matrix = self.confusion_matrix(percentage=False, labeled=False)
#TODO: add labels, title, legends, etc.
pl.imshow(matrix, cmap="binary")
pl.show()
def n_emotions(self, emotion, partition):
"""Returns number of `emotion` data samples in a particular `partition`
('test' or 'train')
"""
if partition == "test":
return len([y for y in self.y_test if y == emotion])
elif partition == "train":
return len([y for y in self.y_train if y == emotion])
def get_samples_by_class(self):
"""
Returns a dataframe that contains the number of training
and testing samples for all emotions.
Note that if data isn't loaded yet, it'll be loaded
"""
if not self.data_loaded:
self.load_data()
train_samples = []
test_samples = []
total = []
for emotion in self.emotions:
n_train = self.n_emotions(emotion, "train")
n_test = self.n_emotions(emotion, "test")
train_samples.append(n_train)
test_samples.append(n_test)
total.append(n_train + n_test)
# get total
total.append(sum(train_samples) + sum(test_samples))
train_samples.append(sum(train_samples))
test_samples.append(sum(test_samples))
return pd.DataFrame(data={"train": train_samples, "test": test_samples, "total": total}, index=self.emotions + ["total"])
def get_random_emotion(self, emotion, partition="train"):
"""
Returns random `emotion` data sample index on `partition`.
"""
if partition == "train":
index = random.choice(list(range(len(self.y_train))))
while self.y_train[index] != emotion:
index = random.choice(list(range(len(self.y_train))))
elif partition == "test":
index = random.choice(list(range(len(self.y_test))))
while self.y_train[index] != emotion:
index = random.choice(list(range(len(self.y_test))))
else:
raise TypeError("Unknown partition, only 'train' or 'test' is accepted")
return index
def plot_histograms(classifiers=True, beta=0.5, n_classes=3, verbose=1):
"""
Loads different estimators from `grid` folder and calculate some statistics to plot histograms.
Params:
classifiers (bool): if `True`, this will plot classifiers, regressors otherwise.
beta (float): beta value for calculating fbeta score for various estimators.
n_classes (int): number of classes
"""
# get the estimators from the performed grid search result
estimators = get_best_estimators(classifiers)
final_result = {}
for estimator, params, cv_score in estimators:
final_result[estimator.__class__.__name__] = []
for i in range(3):
result = {}
# initialize the class
detector = EmotionRecognizer(estimator, verbose=0)
# load the data
detector.load_data()
if i == 0:
# first get 1% of sample data
sample_size = 0.01
elif i == 1:
# second get 10% of sample data
sample_size = 0.1
elif i == 2:
# last get all the data
sample_size = 1
# calculate number of training and testing samples
n_train_samples = int(len(detector.X_train) * sample_size)
n_test_samples = int(len(detector.X_test) * sample_size)
# set the data
detector.X_train = detector.X_train[:n_train_samples]
detector.X_test = detector.X_test[:n_test_samples]
detector.y_train = detector.y_train[:n_train_samples]
detector.y_test = detector.y_test[:n_test_samples]
# calculate train time
t_train = time()
detector.train()
t_train = time() - t_train
# calculate test time
t_test = time()
test_accuracy = detector.test_score()
t_test = time() - t_test
# set the result to the dictionary
result['train_time'] = t_train
result['pred_time'] = t_test
result['acc_train'] = cv_score
result['acc_test'] = test_accuracy
result['f_train'] = detector.train_fbeta_score(beta)
result['f_test'] = detector.test_fbeta_score(beta)
if verbose:
print(f"[+] {estimator.__class__.__name__} with {sample_size*100}% ({n_train_samples}) data samples achieved {cv_score*100:.3f}% Validation Score in {t_train:.3f}s & {test_accuracy*100:.3f}% Test Score in {t_test:.3f}s")
# append the dictionary to the list of results
final_result[estimator.__class__.__name__].append(result)
if verbose:
print()
visualize(final_result, n_classes=n_classes)
def visualize(results, n_classes):
"""
Visualization code to display results of various learners.
inputs:
- results: a dictionary of lists of dictionaries that contain various results on the corresponding estimator
- n_classes: number of classes
"""
n_estimators = len(results)
# naive predictor
accuracy = 1 / n_classes
f1 = 1 / n_classes
# Create figure
fig, ax = pl.subplots(2, 4, figsize = (11,7))
# Constants
bar_width = 0.4
colors = [ (random.random(), random.random(), random.random()) for _ in range(n_estimators) ]
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):
for i in np.arange(3):
x = bar_width * n_estimators
# Creative plot code
ax[j//3, j%3].bar(i*x+k*(bar_width), results[learner][i][metric], width = bar_width, color = colors[k])
ax[j//3, j%3].set_xticks([x-0.2, x*2-0.2, x*3-0.2])
ax[j//3, j%3].set_xticklabels(["1%", "10%", "100%"])
ax[j//3, j%3].set_xlabel("Training Set Size")
ax[j//3, j%3].set_xlim((-0.2, x*3))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Set additional plots invisibles
ax[0, 3].set_visible(False)
ax[1, 3].axis('off')
# Create legend
for i, learner in enumerate(results.keys()):
pl.bar(0, 0, color=colors[i], label=learner)
pl.legend()
# Aesthetics
pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10)
pl.tight_layout()
pl.show() | 45.01217 | 236 | 0.619981 |
e3db7c6be470d4f05ae6f1a7b131a6f2459dd860 | 9,754 | py | Python | src/collect/fraudulous.py | ymentha14/emojis_dataset | 8c5565a39c2862c14dfe41c3b441e9d3e5896a27 | [
"MIT"
] | null | null | null | src/collect/fraudulous.py | ymentha14/emojis_dataset | 8c5565a39c2862c14dfe41c3b441e9d3e5896a27 | [
"MIT"
] | null | null | null | src/collect/fraudulous.py | ymentha14/emojis_dataset | 8c5565a39c2862c14dfe41c3b441e9d3e5896a27 | [
"MIT"
] | null | null | null | """
Fraudulous users detection
As many ways exist for a user input to be invalid (garbage repeated input, random words answering etc)
One need to either
(1): perform a workers selection on-the-fly (possible with mt2gf)
(2): accept all data in a first time and filter it afterwards
The second option was chosen for the emojis dataset: this file contains the functions to filter out such
fraudulous inputs
"""
import pickle as pk
from src.constants import (
emotions_faces,
REF_PATH,
MAPPING_PATH,
E2V_PATH,
W2V_PATH,
DATA_PATH,
)
import sys
sys.path.append("../../emoji2vec_working/")
from src.exploration.form10_eda import *
import seaborn as sns
import numpy as np
from src.constants import COLOR_FRAUD, COLOR_TRUE
import Levenshtein
from pdb import set_trace
from src.utils import extract_emojis
###################### SINGLE WORD ######################
def detect_repeat_frauders(form_df, threshold=0.8):
"""
Detect the fraudulous workers i.e. the one who repeated the same word too many times
"""
form_df = form_df.copy()
columns = [col for col in form_df.columns if col not in [
"Timestamp", "WorkerID"]]
form_df["vocsize"] = form_df[columns].apply(lambda x: len(set(x)), axis=1)
fraud_workers = form_df[form_df["vocsize"] < threshold * len(columns)][
"WorkerID"
].values.tolist()
return set(fraud_workers)
def detect_honey_frauders(form_df, honeypots, dist_lshtein=2):
"""
Returns the worker_ids of the workers who did not manage to find the honeypots
Args:
form_df (pd.df): as saved by download_multi_emoji_csv
dist_lshteing (int): distance tolerated to accept a honeypot
"""
assert form_df["WorkerID"].is_unique
form_df = form_df.set_index("WorkerID").copy()
honey_columns = [em for em in form_df.columns if em in honeypots.keys()]
form_df = form_df[honey_columns]
assert form_df.shape[1] > 0
for em in honey_columns:
corr_words = honeypots[em]
form_df[em] = form_df[em].apply(
lambda word: min(
[Levenshtein.distance(word, corr_word)
for corr_word in corr_words]
)
> dist_lshtein
)
frauder_list = form_df[form_df.any(axis=1)].index.tolist()
return set(frauder_list)
def get_wrong_honey_entries(form_df, honeypots, dist_lshtein=2):
"""
Returns the row of form_df which did not pass the honeypots test
Args:
form_df (pd.df): dataframe from a gform
Return:
[pd.df]: same df with honey frauders entries exclusively
"""
form_df = form_df.set_index("WorkerID").copy()
honey_columns = [em for em in form_df.columns if em in honeypots.keys()]
form_df = form_df[honey_columns]
assert form_df.shape[1] > 0
for em in honey_columns:
corr_words = honeypots[em]
form_df[em] = form_df[em].apply(
lambda word: min(
[Levenshtein.distance(word, corr_word)
for corr_word in corr_words]
)
> dist_lshtein
)
return form_df
#########################################################
def plot_double_hist(user_serie, fraud):
user_serie = user_serie.astype(int)
user_serie_true = user_serie[~user_serie.index.isin(fraud)]
user_serie_fraud = user_serie[user_serie.index.isin(fraud)]
fig, ax = plt.subplots(1)
ax.set_title("Rates of constant-answering users")
user_serie = user_serie.value_counts()
# user_serie_true.plot(kind='bar',color=COLOR_TRUE,label=,rot=0,ax=ax,alpha=0.5)
# user_serie_fraud.plot(kind='bar',color=COLOR_FRAUD,label="fraud",rot=0,ax=ax,alpha=0.5)
ax.hist(
[user_serie_true, user_serie_fraud],
color=[COLOR_TRUE, COLOR_FRAUD],
label=["non-fraud", "fraud"],
)
ax.set_xlabel("Fraudulent")
ax.set_ylabel("Users count")
ax.legend()
# CONSTANTS
def get_users_cstt(form_df):
if type(form_df) is list:
return pd.concat([get_users_cstt(df) for df in form_df])
formset_df = form_df.applymap(lambda x: frozenset(x.split(",")))
# detect users giving constant answers
cstt_mask = (formset_df.applymap(len) == 1).any(axis=1)
return cstt_mask
def dtct_cstt_answer(form_df):
""" detect users giving at least one cstt answer (3 times the same word)"""
cstt_mask = get_users_cstt(form_df)
cstt_users = cstt_mask.index[cstt_mask].tolist()
return cstt_users
# DUPLICATES
def get_users_duplicate(form_df, ratio=0.9):
if type(form_df) is list:
return pd.concat([get_users_duplicate(df) for df in form_df])
n_cols = form_df.shape[1]
# transform the strings in frozen sets
form_df = form_df.applymap(lambda x: frozenset(x.split(",")))
duplicate_mask = form_df.apply(lambda x: len(
set(x)) < int(n_cols * ratio), axis=1)
return duplicate_mask
def dtct_duplicate_answer(form_df, ratio=0.95):
"""detect users giving many times the same answer (up to ratio * the number of answers)"""
duplicate_mask = get_users_duplicate(form_df, ratio)
duplicate_users = set(duplicate_mask.index[duplicate_mask].tolist())
return duplicate_users
# VOCABULARY
def compute_voc_size(form_df):
"""
Compute the vocabulary size for each user
Args:
form_df(pd.DataFrame): formular df
Return:
[pd.Series]: series associating the voc size to each user
"""
if type(form_df) is list:
return pd.concat([compute_voc_size(df) for df in form_df])
return form_df.apply(lambda x: len(set("".join(x).split(","))), axis=1)
def plot_voc(tot_voc, fraud):
"""
Plot overlapping histograms of vocabulary size for fraudulent
and non fraudulent users
Args:
tot_voc (pd.Series): serie associating a voc size to each user
fraud (set): set of fraudulent users
"""
fig, ax = plt.subplots(1)
ax.set_title("Vocabulary size for non/fraudulent users")
bins = 10
tot_voc_true = tot_voc[~tot_voc.index.isin(fraud)]
tot_voc_fraud = tot_voc[tot_voc.index.isin(fraud)]
ax.hist([tot_voc_true, tot_voc_fraud], color=[
COLOR_TRUE, COLOR_FRAUD], bins=bins)
ax.set_xlabel("Voc size")
ax.set_ylabel("Users count")
ax.legend()
def dtct_poor_voc(form_df, ratio=0.55):
voc = compute_voc_size(form_df)
N = form_df.shape[1]
N_lim = int(ratio * N * 3)
poor_users = voc[voc <= N_lim].index.tolist()
return poor_users
# SEMANTIC
def get_vec_error(form_df, w2v, e2v, ref="mean", loss="l1"):
"""
return the mean MSE over each emoji for each user
"""
cols = [col for col in form_df.columns if col in e2v.vocab]
form_df = form_df[cols]
vec_error = form_df.applymap(
lambda x: np.mean(
[w2v.get_vector(word) for word in x.split(",") if word in w2v.vocab], axis=0
)
)
if ref == "mean":
# using average of batch as reference
mean_vecs = np.mean(vec_error.values, axis=0).tolist()
else:
assert ref == "em"
# using emojis vec as reference
mean_vecs = [e2v.get_vector(em) for em in form_df.columns]
# normalize
if loss == "l2":
vec_error = (
vec_error - mean_vecs).applymap(lambda x: x ** 2).applymap(sum)
else:
assert loss == "l1"
vec_error = abs(vec_error - mean_vecs).applymap(sum)
vec_error = vec_error.sum(axis=1).sort_values()
return vec_error
def dtct_vec_error(form_df):
vec_error = get_vec_error(form1_df)
return vec_error.head(3).index.tolist()
def plot_vec_error(vec_error, fraud, ax=None):
"""
Plot the error for each user as a barplot
Args:
vec_error(pd.Series): vector associating the L1orL2 error to each user (index)
fraud (set): set of fraudulous users
"""
if ax is None:
fig, ax = plt.subplots(1)
vec_error = vec_error.to_frame("error")
vec_error["color"] = [
COLOR_FRAUD if ind in fraud else COLOR_TRUE for ind in vec_error.index
]
vec_error["error"].plot.bar(color=vec_error.color, ax=ax)
ax.set_title("mean L2orL1 error between user and emoji representations")
ax.set_xlabel("user id")
ax.set_ylabel("L2orL1")
# FRAUDULOUS DETECTION
def gather_users(*args):
""" gather the users of every formdf passed in parameter into a single set"""
return set.union(*[set(form_df.index.tolist()) for form_df in args])
def find_fraudulous(form_df, filter_funcs):
"""
Find the fraudulous users in form_df using each of the filter_functions
Args:
form_df (pd.Df): formular to filter
filter_funcs(func): function returning a list of fraudulous user from a df
Return:
[list of str]: list of fraudulent users
"""
fraudulous_users = {}
for filter_func in filter_funcs:
func_name = str(filter_func).split()[1]
new_fraud_users = filter_func(form_df)
fraudulous_users[func_name] = new_fraud_users
return fraudulous_users
def fraud_metrics(users, fraud, fraud_hat):
"""
Plot the fraudulous analysis metrics
"""
TP = len(fraud.intersection(fraud_hat))
FP = len(fraud_hat - fraud)
FN = len(fraud - fraud_hat)
TN = len((users - fraud.union(fraud_hat)))
accuracy = (TN + TP) / (TP + TN + FP + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
confusion_matrix = np.array([[TP, FP], [FN, TN]])
print(f"Accuracy:{accuracy}\nPrecision:{precision}\nRecall:{recall}")
fig, ax = plt.subplots(1)
sns.heatmap(
confusion_matrix,
annot=True,
ax=ax,
xticklabels=["P", "N"],
yticklabels=["P", "N"],
)
ax.set_xlabel("True")
ax.set_ylabel("Predicted")
| 31.566343 | 104 | 0.655526 |
e97517a4f084076d5a18c085ea6f1487fed1b18b | 3,459 | py | Python | mint/timelord/timelord_launcher.py | sai-genesis/rc1-test | 56e565952b283450c8589296f87c31b1c67b8502 | [
"Apache-2.0"
] | null | null | null | mint/timelord/timelord_launcher.py | sai-genesis/rc1-test | 56e565952b283450c8589296f87c31b1c67b8502 | [
"Apache-2.0"
] | null | null | null | mint/timelord/timelord_launcher.py | sai-genesis/rc1-test | 56e565952b283450c8589296f87c31b1c67b8502 | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from mint.util.mint_logging import initialize_logging
from mint.util.config import load_config
from mint.util.default_root import DEFAULT_ROOT_PATH
from mint.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(net_config["self_hostname"], port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("mint_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
| 30.078261 | 100 | 0.65279 |
a4acc85c78bd01cfc10bfd7593f07e7260df76e0 | 1,753 | py | Python | test/test_deferred.py | tcdude/py-klondike-solver | 1eb45cd556f0c934c0a079eeb43e8a99d943c094 | [
"MIT"
] | 1 | 2020-02-26T18:20:46.000Z | 2020-02-26T18:20:46.000Z | test/test_deferred.py | tcdude/py-klondike-solver | 1eb45cd556f0c934c0a079eeb43e8a99d943c094 | [
"MIT"
] | 2 | 2020-03-17T11:11:32.000Z | 2021-03-20T19:18:23.000Z | test/test_deferred.py | tcdude/py-klondike-solver | 1eb45cd556f0c934c0a079eeb43e8a99d943c094 | [
"MIT"
] | null | null | null | """
Unit tests for the deferred module.
"""
from pyksolve import deferred
__author__ = 'Tiziano Bettio'
__license__ = 'MIT'
__version__ = '0.0.10'
__copyright__ = """
Copyright (c) 2020 Tiziano Bettio
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def test_deferred_solver():
"""
Test the functionality of the deferred_solver.
"""
d = deferred.DeferredSolver(draw_counts=(1, 2, 3), cache_num=1, threads=3)
seed, diagram, moves = d.get_solved(1)
assert seed > 0
assert diagram != ''
assert moves != ''
seed, diagram, moves = d.get_solved(2)
assert seed > 0
assert diagram != ''
assert moves != ''
seed, diagram, moves = d.get_solved(3)
assert seed > 0
assert diagram != ''
assert moves != ''
d.stop()
| 34.372549 | 78 | 0.733029 |
72db4c269839bf9fe2c9fc07bd3e57db4139356a | 2,568 | py | Python | blocc/forms.py | kelvin-daniel/neighborhood | ea4123ef5b42a9046f10a1d55827bd0b82dd4cd1 | [
"MIT"
] | null | null | null | blocc/forms.py | kelvin-daniel/neighborhood | ea4123ef5b42a9046f10a1d55827bd0b82dd4cd1 | [
"MIT"
] | null | null | null | blocc/forms.py | kelvin-daniel/neighborhood | ea4123ef5b42a9046f10a1d55827bd0b82dd4cd1 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from .models import *
from django.core.exceptions import ValidationError
def ForbiddenUsers(value):
forbidden_users = ['admin', 'css', 'js', 'authenticate', 'login', 'logout', 'administrator', 'root',
'email', 'user', 'join', 'sql', 'static', 'python', 'delete']
if value.lower() in forbidden_users:
raise ValidationError('Invalid name for user, this is a reserverd word.')
def InvalidUser(value):
if '@' in value or '+' in value or '-' in value:
raise ValidationError('This is an Invalid user, Do not user these chars: @ , - , + ')
def UniqueEmail(value):
if User.objects.filter(email__iexact=value).exists():
raise ValidationError('User with this email already exists.')
def UniqueUser(value):
if User.objects.filter(username__iexact=value).exists():
raise ValidationError('User with this username already exists.')
class SignupForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(), max_length=30, required=True,)
email = forms.CharField(widget=forms.EmailInput(), max_length=100, required=True,)
password = forms.CharField(widget=forms.PasswordInput())
confirm_password = forms.CharField(widget=forms.PasswordInput(), required=True, label="Confirm your password.")
class Meta:
model = User
fields = ('username', 'email', 'password')
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields['username'].validators.append(ForbiddenUsers)
self.fields['username'].validators.append(InvalidUser)
self.fields['username'].validators.append(UniqueUser)
self.fields['email'].validators.append(UniqueEmail)
def clean(self):
super(SignupForm, self).clean()
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
if password != confirm_password:
self._errors['password'] = self.error_class(['Passwords do not match. Try again'])
return self.cleaned_data
class NotificationsForm(forms.ModelForm):
class Meta:
model=Notification
exclude=['author','block','post_date']
class ProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['username']
class PostForm(forms.ModelForm):
class Meta:
model=Post
exclude=['username','block','avatar']
class BusinessForm(forms.ModelForm):
class Meta:
model=Business
exclude=['owner','block']
class CommentForm(forms.ModelForm):
class Meta:
model=Comment
exclude=['username','post'] | 34.702703 | 112 | 0.716121 |
6089cb43cccc1563b954608ad7836fd9acf3c272 | 4,323 | py | Python | tests/platform/test_service_cart.py | labd/commercetools-python-sdk | d8ec285f08d56ede2e4cad45c74833f5b609ab5c | [
"MIT"
] | 15 | 2018-11-02T14:35:52.000Z | 2022-03-16T07:51:44.000Z | tests/platform/test_service_cart.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 84 | 2018-11-02T12:50:32.000Z | 2022-03-22T01:25:54.000Z | tests/platform/test_service_cart.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
] | 13 | 2019-01-03T09:16:50.000Z | 2022-02-15T18:37:19.000Z | import uuid
import pytest
from commercetools.platform import models
from commercetools.platform.client import Client
def test_cart_get_by_id(ct_platform_client: Client, cart_draft):
cart = ct_platform_client.with_project_key("test").carts().post(cart_draft)
assert cart.id
@pytest.fixture
def cart_draft(ct_platform_client: Client):
client = ct_platform_client.with_project_key("test")
product_1 = client.products().post(
models.ProductDraft(
key="product-1",
product_type=models.ProductTypeResourceIdentifier(key="dummy"),
name=models.LocalizedString(en=f"my-product-1"),
slug=models.LocalizedString(en=f"my-product-1"),
publish=True,
)
)
product_2 = client.products().post(
models.ProductDraft(
key="product-2",
product_type=models.ProductTypeResourceIdentifier(key="dummy"),
name=models.LocalizedString(en=f"my-product-2"),
slug=models.LocalizedString(en=f"my-product-2"),
publish=True,
)
)
return models.CartDraft(
customer_id=str(uuid.uuid4()),
customer_email="foo@example.com",
currency="GBP",
anonymous_id=str(uuid.uuid4()),
country="GB",
inventory_mode=models.InventoryMode.NONE,
tax_mode=models.TaxMode.PLATFORM,
tax_rounding_mode=models.RoundingMode.HALF_EVEN,
tax_calculation_mode=models.TaxCalculationMode.LINE_ITEM_LEVEL,
line_items=[
models.LineItemDraft(product_id=product_1.id, quantity=1),
models.LineItemDraft(product_id=product_2.id, quantity=2),
],
locale="en",
origin=models.CartOrigin.CUSTOMER,
)
def test_update_actions(commercetools_api, ct_platform_client, cart_draft):
client = ct_platform_client.with_project_key("test")
cart = client.carts().post(cart_draft)
payment_reference = models.PaymentReference(id=str(uuid.uuid4()))
cart = (
client.carts()
.with_id(cart.id)
.post(
models.CartUpdate(
version=cart.version,
actions=[models.CartAddPaymentAction(payment=payment_reference)],
)
)
)
type_draft = models.TypeDraft(
key="foobar",
resource_type_ids=[models.ResourceTypeId.ORDER],
name={"en-US": "test"},
field_definitions=[
models.FieldDefinition(
type=models.CustomFieldStringType(),
name="foo1",
label={"en-US": "foo-1"},
required=False,
),
models.FieldDefinition(
type=models.CustomFieldSetType(element_type=None),
name="foo2",
label={"en-US": "foo-2"},
required=False,
),
models.FieldDefinition(
type=models.CustomFieldBooleanType(),
name="foo3",
label={"en-US": "foo-3"},
required=False,
),
],
)
custom_type = client.types().post(type_draft)
assert custom_type.id
assert cart.payment_info.payments[0] == payment_reference
cart = (
client.carts()
.with_id(cart.id)
.post(
models.CartUpdate(
version=cart.version,
actions=[
models.CartSetCustomTypeAction(
type=models.TypeResourceIdentifier(id=custom_type.id)
)
],
)
)
)
cart = (
client.carts()
.with_id(cart.id)
.post(
models.CartUpdate(
version=cart.version,
actions=[
models.CartSetCustomFieldAction(name="foo1", value="bar"),
models.CartSetCustomFieldAction(name="foo2", value=["bar"]),
models.CartSetCustomFieldAction(name="foo3", value=False),
],
)
)
)
cart = client.carts().with_id(cart.id).get()
assert all(key in cart.custom.fields for key in ["foo1", "foo2", "foo3"])
assert cart.custom.fields["foo1"] == "bar"
assert cart.custom.fields["foo2"] == ["bar"]
assert cart.custom.fields["foo3"] is False
| 31.554745 | 81 | 0.573907 |
1464d17e9cc0138c0d1aa361ecd223461d7127e5 | 135 | py | Python | iterpop/__init__.py | mmore500/iterpop | 7f93edd7405fa7363a1c7f80a5a0b22cfe5675ea | [
"MIT"
] | null | null | null | iterpop/__init__.py | mmore500/iterpop | 7f93edd7405fa7363a1c7f80a5a0b22cfe5675ea | [
"MIT"
] | null | null | null | iterpop/__init__.py | mmore500/iterpop | 7f93edd7405fa7363a1c7f80a5a0b22cfe5675ea | [
"MIT"
] | null | null | null | """Top-level package for iterpop."""
__author__ = """Matthew Andres Moreno"""
__email__ = 'm.more500@gmail.com'
__version__ = '0.4.0'
| 22.5 | 40 | 0.688889 |
f8edf2445364db26e513433ef3d656dd157ea6b2 | 714 | py | Python | api/migrations/0001_initial.py | Shelestov7/mi_trainee_task | 2db65806b1b1f491bf10a8d72738aaa57d84f8f5 | [
"MIT"
] | null | null | null | api/migrations/0001_initial.py | Shelestov7/mi_trainee_task | 2db65806b1b1f491bf10a8d72738aaa57d84f8f5 | [
"MIT"
] | 5 | 2021-03-19T03:07:37.000Z | 2021-09-22T18:59:51.000Z | api/migrations/0001_initial.py | Shelestov7/mi_trainee_task | 2db65806b1b1f491bf10a8d72738aaa57d84f8f5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.12 on 2020-05-10 12:09
import datetime
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Secret',
fields=[
('secret_key', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('pass_phrase', models.CharField(max_length=90, verbose_name='Code Phrase')),
('text', models.TextField(verbose_name='Secter Text')),
('life_time', models.DateTimeField(default=datetime.datetime.utcnow)),
],
),
]
| 27.461538 | 120 | 0.612045 |
ec1c692894dff4568184f00256d8d4af6deb10e3 | 3,768 | py | Python | tests/test_others.py | surajpotnuru/sqllineage | fe66eb6dda781eb57dfe3d928432268657c1af4f | [
"MIT"
] | null | null | null | tests/test_others.py | surajpotnuru/sqllineage | fe66eb6dda781eb57dfe3d928432268657c1af4f | [
"MIT"
] | null | null | null | tests/test_others.py | surajpotnuru/sqllineage | fe66eb6dda781eb57dfe3d928432268657c1af4f | [
"MIT"
] | null | null | null | from sqllineage.runner import LineageRunner
from .helpers import helper
def test_use():
helper("USE db1")
def test_table_name_case():
helper(
"""insert overwrite table tab_a
select * from tab_b
union all
select * from TAB_B""",
{"tab_b"},
{"tab_a"},
)
def test_create():
helper("CREATE TABLE tab1 (col1 STRING)", None, {"tab1"})
def test_create_if_not_exist():
helper("CREATE TABLE IF NOT EXISTS tab1 (col1 STRING)", None, {"tab1"})
def test_create_bucket_table():
helper(
"CREATE TABLE tab1 USING parquet CLUSTERED BY (col1) INTO 500 BUCKETS",
None,
{"tab1"},
)
def test_create_as():
helper("CREATE TABLE tab1 AS SELECT * FROM tab2", {"tab2"}, {"tab1"})
def test_create_like():
helper("CREATE TABLE tab1 LIKE tab2", {"tab2"}, {"tab1"})
def test_create_select():
helper("CREATE TABLE tab1 SELECT * FROM tab2", {"tab2"}, {"tab1"})
def test_create_after_drop():
helper(
"DROP TABLE IF EXISTS tab1; CREATE TABLE IF NOT EXISTS tab1 (col1 STRING)",
None,
{"tab1"},
)
def test_update():
helper("UPDATE tab1 SET col1='val1' WHERE col2='val2'", None, {"tab1"})
def test_update_with_join():
helper(
"UPDATE tab1 a INNER JOIN tab2 b ON a.col1=b.col1 SET a.col2=b.col2",
{"tab2"},
{"tab1"},
)
def test_drop():
helper("DROP TABLE IF EXISTS tab1", None, None)
def test_drop_with_comment():
helper(
"""--comment
DROP TABLE IF EXISTS tab1""",
None,
None,
)
def test_drop_after_create():
helper(
"CREATE TABLE IF NOT EXISTS tab1 (col1 STRING);DROP TABLE IF EXISTS tab1",
None,
None,
)
def test_drop_tmp_tab_after_create():
sql = """create table tab_a as select * from tab_b;
insert overwrite table tab_c select * from tab_a;
drop table tab_a;"""
helper(sql, {"tab_b"}, {"tab_c"})
def test_new_create_tab_as_tmp_table():
sql = """create table tab_a as select * from tab_b;
create table tab_c as select * from tab_a;"""
helper(sql, {"tab_b"}, {"tab_c"})
def test_alter_table_rename():
helper("alter table tab1 rename to tab2;", None, None)
def test_alter_target_table_name():
helper(
"insert overwrite tab1 select * from tab2; alter table tab1 rename to tab3;",
{"tab2"},
{"tab3"},
)
def test_refresh_table():
helper("refresh table tab1", None, None)
def test_cache_table():
helper("cache table tab1", None, None)
def test_uncache_table():
helper("uncache table tab1", None, None)
def test_uncache_table_if_exists():
helper("uncache table if exists tab1", None, None)
def test_truncate_table():
helper("truncate table tab1", None, None)
def test_delete_from_table():
helper("delete from table tab1", None, None)
def test_split_statements():
sql = "SELECT * FROM tab1; SELECT * FROM tab2;"
assert len(LineageRunner(sql).statements()) == 2
def test_split_statements_with_heading_and_ending_new_line():
sql = "\nSELECT * FROM tab1;\nSELECT * FROM tab2;\n"
assert len(LineageRunner(sql).statements()) == 2
def test_split_statements_with_comment():
sql = """SELECT 1;
-- SELECT 2;"""
assert len(LineageRunner(sql).statements()) == 1
def test_statements_trim_comment():
comment = "------------------\n"
sql = "select * from dual;"
assert LineageRunner(comment + sql).statements(strip_comments=True)[0] == sql
def test_split_statements_with_show_create_table():
sql = """SELECT 1;
SHOW CREATE TABLE tab1;"""
assert len(LineageRunner(sql).statements()) == 2
def test_split_statements_with_desc():
sql = """SELECT 1;
DESC tab1;"""
assert len(LineageRunner(sql).statements()) == 2
| 21.780347 | 85 | 0.647824 |
a29841920a226ecd4ffd903cc4e04a7966cad19c | 1,163 | py | Python | iot/synth.py | theacodes/google-cloud-python | 57dafcb78540e12c82f7ca0fc77d75edeb269390 | [
"Apache-2.0"
] | 1 | 2020-10-25T04:39:41.000Z | 2020-10-25T04:39:41.000Z | iot/synth.py | di/google-cloud-python | a0bd8d0565e2a682760a113c59ce12b872bce9ab | [
"Apache-2.0"
] | 4 | 2018-11-13T22:15:36.000Z | 2018-12-07T18:31:38.000Z | iot/synth.py | di/google-cloud-python | a0bd8d0565e2a682760a113c59ce12b872bce9ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
#----------------------------------------------------------------------------
# Generate iot client
#----------------------------------------------------------------------------
library = gapic.py_library(
'iot',
'v1',
config_path='/google/cloud/iot/artman_cloudiot.yaml',
artman_output_name='iot-v1')
s.move(library / 'google/cloud/iot_v1')
s.move(library / 'tests/unit/gapic')
s.move(library / 'tests/system/gapic')
| 34.205882 | 77 | 0.644024 |
469340d66ac5f1c53f9af95d519d5680b869631f | 66 | py | Python | ndgpy/data/__init__.py | asrvsn/ndgpy | 4302ec6767feec16286394361fd84d2fc4fe0cdf | [
"MIT"
] | null | null | null | ndgpy/data/__init__.py | asrvsn/ndgpy | 4302ec6767feec16286394361fd84d2fc4fe0cdf | [
"MIT"
] | null | null | null | ndgpy/data/__init__.py | asrvsn/ndgpy | 4302ec6767feec16286394361fd84d2fc4fe0cdf | [
"MIT"
] | null | null | null | from .base import *
from .streaming import *
from .shared import * | 22 | 24 | 0.742424 |
64a953ff2ed981cfbb3e9228631f465ec46e8273 | 1,431 | py | Python | pcdet/models/backbones_3d/vfe/mean_vfe.py | dleam/Associate-3Ddet-V2 | 85f19a6a9b128a8f91c831a27fdae4ffdeb2dc8c | [
"Apache-2.0"
] | 5 | 2021-08-13T16:20:49.000Z | 2021-12-30T10:40:02.000Z | pcdet/models/backbones_3d/vfe/mean_vfe.py | dleam/Associate-3Ddet-V2 | 85f19a6a9b128a8f91c831a27fdae4ffdeb2dc8c | [
"Apache-2.0"
] | 3 | 2021-09-27T14:10:57.000Z | 2021-12-25T05:37:47.000Z | pcdet/models/backbones_3d/vfe/mean_vfe.py | dleam/Associate-3Ddet-V2 | 85f19a6a9b128a8f91c831a27fdae4ffdeb2dc8c | [
"Apache-2.0"
] | null | null | null | import torch
from .vfe_template import VFETemplate
class MeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
points_mean = points_mean / normalizer
batch_dict['voxel_features'] = points_mean.contiguous()
pvoxel_features, pvoxel_num_points = batch_dict['pvoxels'], batch_dict['pvoxel_num_points']
ppoints_mean = pvoxel_features[:, :, :].sum(dim=1, keepdim=False)
pnormalizer = torch.clamp_min(pvoxel_num_points.view(-1, 1), min=1.0).type_as(pvoxel_features)
ppoints_mean = ppoints_mean / pnormalizer
batch_dict['pvoxel_features'] = ppoints_mean.contiguous()
return batch_dict
| 38.675676 | 102 | 0.666667 |
6d84d129a41edba49f4ad67cee5a49b09038f6c4 | 2,523 | py | Python | nboost/database.py | kaykanloo/nboost | 8412ae449345180612eeb3d0d34f79e517fd2dca | [
"Apache-2.0"
] | 646 | 2019-11-12T19:57:12.000Z | 2022-03-30T01:54:58.000Z | nboost/database.py | kaykanloo/nboost | 8412ae449345180612eeb3d0d34f79e517fd2dca | [
"Apache-2.0"
] | 85 | 2019-11-10T18:17:36.000Z | 2021-10-15T11:20:21.000Z | nboost/database.py | kaykanloo/nboost | 8412ae449345180612eeb3d0d34f79e517fd2dca | [
"Apache-2.0"
] | 75 | 2019-11-23T19:30:32.000Z | 2022-03-27T18:15:08.000Z | import os
import time
from typing import Optional
from sqlite3 import Cursor
import sqlite3
from nboost import defaults
class Database:
def __init__(self, db_file: type(defaults.db_file) = defaults.db_file, **_):
os.makedirs(db_file.parent, exist_ok=True)
self.db_file = db_file
def new_row(self):
return DatabaseRow()
def get_cursor(self) -> Cursor:
conn = sqlite3.connect(str(self.db_file), isolation_level=None)
return conn.cursor()
def insert(self, db_row: 'DatabaseRow'):
cursor = self.get_cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS searches (
time REAL,
topk INTEGER,
choices INTEGER,
qa_time REAL,
model_mrr REAL,
server_mrr REAL,
rerank_time REAL,
response_time REAL
);
''')
cursor.execute('''
INSERT INTO searches (
time,
topk,
choices,
qa_time,
model_mrr,
server_mrr,
rerank_time,
response_time
)
VALUES(?,?,?,?,?,?,?,?);
''', (
time.time(),
db_row.topk,
db_row.choices,
db_row.qa_time,
db_row.model_mrr,
db_row.server_mrr,
db_row.rerank_time,
db_row.response_time
))
def get_stats(self) -> dict:
cursor = self.get_cursor()
stats = cursor.execute('''
SELECT
AVG(topk) AS avg_topk,
AVG(choices) AS avg_num_choices,
AVG(rerank_time) AS avg_rerank_time,
AVG(response_time) AS avg_response_time,
AVG(model_mrr) AS avg_model_mrr,
AVG(server_mrr) AS avg_server_mrr
FROM searches
''').fetchone()
columns = [column[0] for column in cursor.description]
return dict(zip(columns, stats))
class DatabaseRow:
def __init__(self):
self.topk = None # type: Optional[int]
self.choices = None # type: Optional[int]
self.qa_time = None # type: Optional[float]
self.model_mrr = None # type: Optional[float]
self.server_mrr = None # type: Optional[float]
self.rerank_time = None # type: Optional[float]
self.response_time = None # type: Optional[float]
| 29.682353 | 80 | 0.529132 |
5692dc922a0e2c74e645d7562d397d13b4ab812d | 872 | py | Python | goratings/interfaces/Storage.py | flovo/goratings | 50b5443b73daae64306e256205eabee8f4815c65 | [
"MIT"
] | 13 | 2020-07-02T16:43:12.000Z | 2021-12-12T00:12:48.000Z | goratings/interfaces/Storage.py | flovo/goratings | 50b5443b73daae64306e256205eabee8f4815c65 | [
"MIT"
] | 13 | 2020-07-05T10:06:42.000Z | 2022-02-27T10:03:24.000Z | goratings/interfaces/Storage.py | flovo/goratings | 50b5443b73daae64306e256205eabee8f4815c65 | [
"MIT"
] | 2 | 2020-07-04T11:19:37.000Z | 2021-01-15T16:46:32.000Z | import abc
from typing import Any, Dict
__all__ = ["Storage"]
class Storage(abc.ABC):
@abc.abstractmethod
def get(self, player_id: int) -> Any:
raise NotImplementedError
@abc.abstractmethod
def set(self, player_id: int, entry: Any) -> None:
raise NotImplementedError
@abc.abstractmethod
def get_set_count(self, player_id: int) -> int:
raise NotImplementedError
@abc.abstractmethod
def clear_set_count(self, player_id: int) -> None:
raise NotImplementedError
@abc.abstractmethod
def all_players(self) -> Dict[int, Any]:
raise NotImplementedError
@abc.abstractmethod
def get_timeout_flag(self, player_id: int) -> bool:
raise NotImplementedError
@abc.abstractmethod
def set_timeout_flag(self, player_id: int, tf: bool) -> None:
raise NotImplementedError
| 24.914286 | 65 | 0.682339 |
946c02234cfe66e218fe30a66dda5c7f421a6e95 | 1,507 | py | Python | tests/test_rows.py | dimagi/djtables | dfe5b9e2ea51fd42e528cbe4a7f23cdcfda59fad | [
"MIT"
] | 1 | 2020-09-15T22:17:11.000Z | 2020-09-15T22:17:11.000Z | tests/test_rows.py | adammck/djtables | 8fa279e7088123f00cca9c838fe028ebf327325e | [
"MIT"
] | null | null | null | tests/test_rows.py | adammck/djtables | 8fa279e7088123f00cca9c838fe028ebf327325e | [
"MIT"
] | 1 | 2019-09-29T04:19:21.000Z | 2019-09-29T04:19:21.000Z | #!/usr/bin/env python
# vim: et ts=4 sw=4
from nose.tools import raises
from djtables.table import Table
from djtables.column import Column
from djtables.row import Row
class TestTable(Table):
name = Column()
weapon = Column()
def test_accepts_dicts():
obj = {
'name': "Leonardo",
'weapon': "Katana"
}
row = Row(TestTable(), obj)
assert row.name == obj['name']
assert row.weapon == obj['weapon']
def test_accepts_objects():
class MockObject(object):
def __init__(self, name, weapon):
self.name = name
self.weapon = weapon
def __unicode__(self):
return self.name
obj = MockObject("Michelangelo", "Nunchaku")
row = Row(TestTable(), obj)
assert row.name == obj.name
assert row.weapon == obj.weapon
assert unicode(row) == unicode(obj)
def test_calls_callables():
obj = {
'name': lambda: "Donatello",
'weapon': lambda: "Bo Staff",
}
row = Row(TestTable(), obj)
assert row.name == "Donatello"
assert row.weapon == "Bo Staff"
def test_returns_none_on_invalid_column():
row = Row(TestTable(), {})
assert row.whatever == None
def test_is_iterable():
data = {
'name': "Raphael",
'weapon': "Sai"
}
row = Row(TestTable(), data)
for cell in row:
assert cell.row == row
assert cell.value in data.values()
def test_has_length():
row = Row(TestTable(), {})
assert len(row) == 2
| 19.828947 | 48 | 0.595222 |
299f5707ecff43da848a89b11466043368ed3e68 | 12,573 | py | Python | tests/test_participant_model.py | bountysource/www.gittip.com | 84cd36dd1da2eec54ebf0fe8c7c31d10ae5b933c | [
"CC0-1.0"
] | 4 | 2015-03-11T02:04:27.000Z | 2021-04-28T01:33:40.000Z | tests/test_participant_model.py | sigmavirus24/www.gittip.com | 112ea4e62aefad11c36954f0f979679ce057cf02 | [
"CC0-1.0"
] | null | null | null | tests/test_participant_model.py | sigmavirus24/www.gittip.com | 112ea4e62aefad11c36954f0f979679ce057cf02 | [
"CC0-1.0"
] | 3 | 2015-08-27T09:47:17.000Z | 2018-08-02T14:05:59.000Z | from __future__ import unicode_literals
import random
import datetime
from decimal import Decimal
import psycopg2
import pytz
from nose.tools import assert_raises
from gittip.testing import Harness
from gittip.models import Participant, Tip
from gittip.participant import Participant as OldParticipant
class Tests(Harness):
def random_restricted_username(self):
"""helper method to chooses a restricted username for testing """
from gittip import RESTRICTED_USERNAMES
random_item = random.choice(RESTRICTED_USERNAMES)
while random_item.startswith('%'):
random_item = random.choice(RESTRICTED_USERNAMES)
return random_item
def setUp(self):
super(Harness, self).setUp()
self.participant = self.make_participant('user1') # Our protagonist
def test_claiming_participant(self):
expected = now = datetime.datetime.now(pytz.utc)
self.participant.set_as_claimed(claimed_at=now)
actual = self.participant.claimed_time
assert actual == expected, actual
def test_changing_username_successfully(self):
self.participant.change_username('user2')
actual = Participant.query.get('user2')
assert self.participant == actual, actual
def test_changing_username_to_too_long(self):
with assert_raises(Participant.UsernameTooLong):
self.participant.change_username('123456789012345678901234567890123')
def test_changing_username_to_already_taken(self):
self.make_participant('user2')
with assert_raises(Participant.UsernameAlreadyTaken):
self.participant.change_username('user2')
def test_changing_username_to_already_taken_is_case_insensitive(self):
self.make_participant('UsEr2')
with assert_raises(Participant.UsernameAlreadyTaken):
self.participant.change_username('uSeR2')
def test_changing_username_to_invalid_characters(self):
with assert_raises(Participant.UsernameContainsInvalidCharacters):
self.participant.change_username(u"\u2603") # Snowman
def test_changing_username_to_restricted_name(self):
with assert_raises(Participant.UsernameIsRestricted):
self.participant.change_username(self.random_restricted_username())
def test_getting_tips_actually_made(self):
expected = Decimal('1.00')
self.make_participant('user2')
self.session.add(Tip(tipper='user1', tippee='user2', amount=expected,
ctime=datetime.datetime.now(pytz.utc)))
self.session.commit()
actual = self.participant.get_tip_to('user2')
assert actual == expected, actual
def test_getting_tips_not_made(self):
expected = Decimal('0.00')
self.make_participant('user2')
actual = self.participant.get_tip_to('user2')
assert actual == expected, actual
# id
def test_participant_gets_a_long_id(self):
actual = type(self.make_participant('alice').id)
assert actual == long, actual
# set_tip_to - stt
def test_stt_sets_tip_to(self):
alice = self.make_participant('alice', last_bill_result='')
self.make_participant('bob')
alice.set_tip_to('bob', '1.00')
actual = alice.get_tip_to('bob')
assert actual == Decimal('1.00'), actual
def test_stt_returns_a_Decimal_and_a_boolean(self):
alice = self.make_participant('alice', last_bill_result='')
self.make_participant('bob')
actual = alice.set_tip_to('bob', '1.00')
assert actual == (Decimal('1.00'), True), actual
def test_stt_returns_False_for_second_time_tipper(self):
alice = self.make_participant('alice', last_bill_result='')
self.make_participant('bob')
alice.set_tip_to('bob', '1.00')
actual = alice.set_tip_to('bob', '2.00')
assert actual == (Decimal('2.00'), False), actual
def test_stt_doesnt_allow_self_tipping(self):
alice = self.make_participant('alice', last_bill_result='')
assert_raises( OldParticipant.NoSelfTipping
, alice.set_tip_to
, 'alice'
, '1000000.00'
)
def test_stt_doesnt_allow_just_any_ole_amount(self):
alice = self.make_participant('alice', last_bill_result='')
self.make_participant('bob')
assert_raises( OldParticipant.BadAmount
, alice.set_tip_to
, 'bob'
, '1000000.00'
)
def test_stt_fails_to_tip_unknown_people(self):
alice = self.make_participant('alice', last_bill_result='')
assert_raises( psycopg2.IntegrityError
, alice.set_tip_to
, 'bob'
, '1.00'
)
# get_dollars_receiving - gdr
def test_gdr_only_sees_latest_tip(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to('bob', '12.00')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('3.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
def test_gdr_includes_tips_from_accounts_with_a_working_card(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('3.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
def test_gdr_ignores_tips_from_accounts_with_no_card_on_file(self):
alice = self.make_participant('alice', last_bill_result=None)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('0.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
def test_gdr_ignores_tips_from_accounts_with_a_failing_card_on_file(self):
alice = self.make_participant('alice', last_bill_result="Fail!")
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('0.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
def test_gdr_includes_tips_from_whitelisted_accounts(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, is_suspicious=False
)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('3.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
def test_gdr_includes_tips_from_unreviewed_accounts(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, is_suspicious=None
)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('3.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
def test_gdr_ignores_tips_from_blacklisted_accounts(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, is_suspicious=True
)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
expected = Decimal('0.00')
actual = bob.get_dollars_receiving()
assert actual == expected, actual
# get_number_of_backers - gnob
def test_gnob_gets_number_of_backers(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob', last_bill_result='')
clancy = self.make_participant('clancy')
alice.set_tip_to('clancy', '3.00')
bob.set_tip_to('clancy', '1.00')
self.session.commit()
actual = clancy.get_number_of_backers()
assert actual == 2, actual
def test_gnob_includes_backers_with_a_working_card_on_file(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 1, actual
def test_gnob_ignores_backers_with_no_card_on_file(self):
alice = self.make_participant('alice', last_bill_result=None)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 0, actual
def test_gnob_ignores_backers_with_a_failing_card_on_file(self):
alice = self.make_participant('alice', last_bill_result="Fail!")
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 0, actual
def test_gnob_includes_whitelisted_backers(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, is_suspicious=False
)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 1, actual
def test_gnob_includes_unreviewed_backers(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, is_suspicious=None
)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 1, actual
def test_gnob_ignores_blacklisted_backers(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, is_suspicious=True
)
bob = self.make_participant('bob')
alice.set_tip_to('bob', '3.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 0, actual
def test_gnob_ignores_backers_where_tip_is_zero(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to('bob', '0.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 0, actual
def test_gnob_looks_at_latest_tip_only(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to('bob', '1.00')
alice.set_tip_to('bob', '12.00')
alice.set_tip_to('bob', '3.00')
alice.set_tip_to('bob', '6.00')
alice.set_tip_to('bob', '0.00')
self.session.commit()
actual = bob.get_number_of_backers()
assert actual == 0, actual
# get_age_in_seconds - gais
def test_gais_gets_age_in_seconds(self):
now = datetime.datetime.now(pytz.utc)
alice = self.make_participant('alice', claimed_time=now)
actual = alice.get_age_in_seconds()
assert 0 < actual < 1, actual
def test_gais_returns_negative_one_if_None(self):
alice = self.make_participant('alice', claimed_time=None)
actual = alice.get_age_in_seconds()
assert actual == -1, actual
# def get_details(self):
# def resolve_unclaimed(self):
# def set_as_claimed(self):
# def change_username(self, suggested):
# def get_accounts_elsewhere(self):
# def get_tip_to(self, tippee):
# def get_dollars_receiving(self):
# def get_dollars_giving(self):
# def get_chart_of_receiving(self):
# def get_giving_for_profile(self, db=None):
# def get_tips_and_total(self, for_payday=False, db=None):
# def take_over(self, account_elsewhere, have_confirmation=False):
| 35.820513 | 81 | 0.621809 |
524aab9c6660f8bfa2544c8ea56eb1354666036e | 2,047 | py | Python | openbook_common/management/commands/worker_health_check.py | TamaraAbells/okuna-api | f87d8e80d2f182c01dbce68155ded0078ee707e4 | [
"MIT"
] | 164 | 2019-07-29T17:59:06.000Z | 2022-03-19T21:36:01.000Z | openbook_common/management/commands/worker_health_check.py | TamaraAbells/okuna-api | f87d8e80d2f182c01dbce68155ded0078ee707e4 | [
"MIT"
] | 188 | 2019-03-16T09:53:25.000Z | 2019-07-25T14:57:24.000Z | openbook_common/management/commands/worker_health_check.py | TamaraAbells/okuna-api | f87d8e80d2f182c01dbce68155ded0078ee707e4 | [
"MIT"
] | 80 | 2019-08-03T17:49:08.000Z | 2022-02-28T16:56:33.000Z | from sys import exit
from logging import getLogger
from django.conf import settings
from django.core.management.base import BaseCommand
from openbook_common.utils.rq_helpers import RQStats
from openbook_common.helpers import send_alert_to_channel
from openbook.settings import RQ_QUEUES, FAILED_JOB_THRESHOLD
from openbook.settings import ACTIVE_JOB_THRESHOLD, ACTIVE_WORKER_THRESHOLD
logger = getLogger(__name__)
class Command(BaseCommand):
help = 'Check worker health'
def verify_worker_health(self):
# iterate through all configured queues
env = settings.ENVIRONMENT
for queue in RQ_QUEUES.keys():
rq_stats = RQStats(queue)
active_job_count = rq_stats.get_active_job_count()
if active_job_count >= ACTIVE_JOB_THRESHOLD:
send_alert_to_channel(
f"*UH-OH: we have way too many active jobs "
f"in {env}:{queue} right now: {active_job_count}!!*"
)
print(f"{queue} has too many jobs {active_job_count}")
self.retval += 1
active_worker_count = rq_stats.get_active_worker_count()
if active_worker_count >= ACTIVE_WORKER_THRESHOLD:
send_alert_to_channel(f"*Hmm, we are not supposed to have "
f"{active_worker_count} workers in "
f"{env}:{queue}*")
print(f"{queue} has too many workers {active_worker_count}")
self.retval += 1
def handle(self, *args, **options):
self.retval = 1
try:
self.verify_worker_health()
except Exception as e:
exception = str(e)
send_alert_to_channel(
f"worker_health_check failed with {exception}"
)
raise e
# the return code will be equal to the amount of threshold
# violation
exit(self.retval)
| 30.552239 | 77 | 0.595017 |
1dccadddf33e58ed304be2ee6361b3566d323dd2 | 31,577 | py | Python | cc_gen.py | skolchin/gbr | 3ec4b72e0352d36f38f5cd5815b69fac0b7a3e9c | [
"MIT"
] | 29 | 2019-10-10T22:51:55.000Z | 2022-03-09T05:57:59.000Z | cc_gen.py | skolchin/gbr | 3ec4b72e0352d36f38f5cd5815b69fac0b7a3e9c | [
"MIT"
] | 1 | 2020-12-07T06:51:50.000Z | 2020-12-08T16:59:20.000Z | cc_gen.py | skolchin/gbr | 3ec4b72e0352d36f38f5cd5815b69fac0b7a3e9c | [
"MIT"
] | 11 | 2020-12-09T01:44:38.000Z | 2022-03-20T17:40:02.000Z | #-------------------------------------------------------------------------------
# Name: Go board recognition project
# Purpose: OpenCV board extraction script
#
# Author: kol
#
# Created: 18.12.2019
# Copyright: (c) kol 2019
# Licence: MIT
#-------------------------------------------------------------------------------
import os
import sys
import glob
import numpy as np
import cv2
import json
from pathlib import Path
from random import randint, randrange, shuffle
from argparse import ArgumentParser
from copy import deepcopy
from collections import OrderedDict
#sys.path.append("../")
from gr.grdef import *
from gr.board import GrBoard
from gr.utils import get_image_area, resize, rotate
TF_AVAIL=True
try:
import tensorflow as tf
from object_detection.utils import dataset_util
from tfrecord_lite import tf_record_iterator
except:
TF_AVAIL=False
class DatasetWriter:
"""Basic dataset writer"""
def __init__(self, root_dir, file_count, split, reg_name):
self.root_dir, self.file_count = root_dir, file_count
self.reg_name, self.split = reg_name, split
self.file_index, self.file_name, self.file_image = None, None, None
self.area_index, self.area_count = None, None
def get_next_fname(self, label=None, ext='.png'):
"""Derive image file from source file name"""
prefix = Path(self.file_name).stem + "_" + Path(self.file_name).suffix[1:]
fn = str(prefix) + "_" + str(self.area_index).zfill(3) + ext
path = []
if label is not None:
path.append(label)
if self.split is not None and self.split > 0:
if self.area_count is None:
raise ValueError('Area count must be set')
if self.area_index < self.area_count * (1.0 - self.split):
path.append('train')
else:
path.append('val')
path.append(fn)
fn = Path(self.root_dir).joinpath(*path)
self.area_index += 1
return fn
def get_reg_fname(self, label=None):
"""Get registration file name"""
path = []
if label is not None:
path.append(label)
if self.split is not None and self.split > 0:
if self.area_count is None:
raise ValueError('Area count must be set')
if self.area_index < self.area_count * (1.0 - self.split):
path.append('train')
else:
path.append('val')
path.append(self.reg_name)
fn = Path(self.root_dir).joinpath(*path)
fn.parent.mkdir(exist_ok=True, parents=True)
return fn
def set_image(self, file_index, file_name, file_image, area_count):
self.file_index, self.file_name, self.file_image = file_index, file_name, file_image
self.area_index, self.area_count = 0, area_count
self.write_image_info()
def write_image_info(self):
pass
def write_area(self, area, label=None):
area_img = get_image_area(self.file_image, area)
if area_img is not None:
self.write_area_image(area, area_img, label)
def write_area_image(self, area, area_image, label=None):
pass
def finish_image(self):
pass
def close(self):
pass
class TxtDatasetWriter(DatasetWriter):
def __init__(self, root_dir, file_count, split=None, reg_name='description.txt'):
super(TxtDatasetWriter, self).__init__(root_dir, file_count, split, reg_name)
def write_area_image(self, area, area_image, label=None):
# Define file names
mode = 'w' if self.file_index == 0 else 'a'
fn_area = self.get_next_fname(label)
fn_reg = self.get_reg_fname(label)
# Save file
cv2.imwrite(str(fn_area), area_image)
# Save file info
with open(str(fn_reg), mode) as f_reg:
f_reg.write('{} 1 {} {} {} {}\n'.format(
fn_area, 0, 0, area_image.shape[1]-1, area_image.shape[0]-1))
f_reg.close()
class TfDatasetWriter(DatasetWriter):
def __init__(self, root_dir, file_count, split=None, reg_name='description.txt',
base_name='go_board.tfrecord'):
super(TfDatasetWriter, self).__init__(root_dir, file_count, split, reg_name)
self.tf_writers = {'all': None, 'train': None, 'val': None}
self.txts, self.lbls, self.xmins, self.xmaxs, self.ymins, self.ymaxs = [], [], [], [], [], []
# Writer mode, either `single` (whole board image file) or `multi` (multiple image parts)
self.mode = 'single'
# Get TF writers
if self.split is None:
self.tf_writers['all'] = tf.io.TFRecordWriter(str(Path(self.root_dir).joinpath(base_name)))
else:
for k in ['train', 'val']:
fn = Path(base_name).stem + '_' + k + Path(base_name).suffix
self.tf_writers[k] = tf.io.TFRecordWriter(str(Path(self.root_dir).joinpath(fn)))
def write_image_info(self):
self.txts, self.lbls, self.xmins, self.xmaxs, self.ymins, self.ymaxs = [], [], [], [], [], []
def write_area(self, area, label):
self.mode = 'single'
height, width = self.file_image.shape[:2]
self.txts.append(str.encode(label, 'utf-8'))
self.lbls.append(1 if label == 'black' else 2)
self.xmins.append(area[0] / width)
self.ymins.append(area[1] / height)
self.xmaxs.append(area[2] / width)
self.ymaxs.append(area[3] / height)
def write_area_image(self, area, area_image, label):
self.mode = 'multi'
height, width = area_image.shape[:2]
txts = [str.encode(label, 'utf-8')]
lbls = [1 if label == 'black' else 2]
xmins = [area[0] / width]
ymins = [area[1] / height]
xmaxs = [area[2] / width]
ymaxs = [area[3] / height]
fn_area = self.get_next_fname(label)
self.save_image(
self.area_index, fn_area, area_image, self.area_count,
[txts, lbls, xmins, xmaxs, ymins, ymaxs]
)
def finish_image(self):
if self.mode == 'single':
self.save_image(
self.file_index, self.file_name, self.file_image, self.file_count,
[self.txts, self.lbls, self.xmins, self.xmaxs, self.ymins, self.ymaxs]
)
def encode_image(self, image):
# Encode image as JPEG via temp file
fn_tmp = Path(self.root_dir).joinpath('_tmp_' + str(randint(1,100)) + '.jpeg')
cv2.imwrite(str(fn_tmp), image)
with open(str(fn_tmp), 'rb') as f_tmp:
img_raw = f_tmp.read()
f_tmp.close()
fn_tmp.unlink()
return img_raw
def save_image(self, image_index, image_file, image, image_count, feature_comps):
# Get encoded image
img_raw = self.encode_image(image)
# Make up features
height, width = image.shape[:2]
fname = str(Path(image_file).parts[-1])
txts, lbls, xmins, xmaxs, ymins, ymaxs = feature_comps
features = tf.train.Example(features=tf.train.Features(feature={
'image/width': dataset_util.int64_feature(width),
'image/height': dataset_util.int64_feature(height),
'image/filename': dataset_util.bytes_feature(str.encode(fname, 'utf-8')),
'image/source_id': dataset_util.bytes_feature(str.encode(fname, 'utf-8')),
'image/format': dataset_util.bytes_feature(b'jpg'),
'image/encoded': dataset_util.bytes_feature(img_raw),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(txts),
'image/object/class/label': dataset_util.int64_list_feature(lbls)
}))
# Save to appropriate TF writer
tf_writer = self.tf_writers['all']
if tf_writer is None:
tf_writer = self.tf_writers['train'] \
if image_index < image_count * (1.0 - self.split) \
else self.tf_writers['val']
tf_writer.write(features.SerializeToString())
# Register file
mode = 'w' if image_index == 0 else 'a'
fn_reg = str(Path(self.root_dir).joinpath(self.reg_name))
with open(str(fn_reg), mode) as f_reg:
f_reg.write('{}\n'.format(image_file))
f_reg.close()
def close(self):
for w in self.tf_writers.values():
if w is not None: w.close()
class DatasetGenerator:
""" Main dataset generator class"""
def __init__(self):
# Datasets to generate
self.datasets = ["positive", "negative", "stones", "crossings", 'bboxes']
# Directories where to place datasets
self.dirs = OrderedDict({"positive": None, "stones": None,
"negative": None, "crossings": None, "bboxes": None})
# Selection pattern
self.pattern = None
# Stone extraction method: single, enclosed, both
self.method = "single"
# Spacing of area to be extracted with particular method
self.spacing = {"single": 10, "enclosed": 1, "crossing": 5, "bboxes": 1}
# Number of negative areas to be extracted per image
self.neg_per_image = 0
# Resize maximum size
self.n_resize = 0
# Flag to exclude grid line crossings
self.no_grid = False
# Rotation vector (0: how many images to generate, 1: rotation angle)
self.n_rotate = [0, 0]
# Dataset output format (txt, json, xml, tf)
self.format = 'txt'
# Dataset split and shuffle flags
self.split = None
self.shuffle = False
# GrBoard currently processed
self.board = None
# Background color
self.bg_c = None
# Areas extracted during current run
self.stone_areas = None
# Dataset writers
self.ds_writers = None
# Statistic
self.file_count = 0
self.counts = {'positives': 0}
self.totals = {'positives': 0}
def overlap(self, a, b):
"""Check two rectangles overlap"""
# from: https://stackoverflow.com/questions/25068538/intersection-and-difference-of-two-rectangles
x1 = max(min(a[0], a[2]), min(b[0], b[2]))
y1 = max(min(a[1], a[3]), min(b[1], b[3]))
x2 = min(max(a[0], a[2]), max(b[0], b[2]))
y2 = min(max(a[1], a[3]), max(b[1], b[3]))
return x1 < x2 and y1 < y2
def get_bg_color(self, img):
"""Find background color of a board as most often occuring color except
shades of black and white"""
u, c = np.unique(img.reshape(-1, img.shape[-1]), axis=0, return_counts=True)
bg_c = u[c.argmax()]
# Check black or white color selected
if sum(bg_c) < 40 or sum(bg_c) >= 750:
cc = c.argsort()
n = -2
while sum(bg_c) < 40 or sum(bg_c) >= 750:
bg_c = u[cc[n]]
n -= 1
return bg_c
def remove_areas(self, img, areas, bg_c):
"""Remove areas from image and pad it with background color"""
for c in areas:
patch = np.full((c[3]-c[1], c[2]-c[0], img.shape[2]), bg_c, dtype = img.dtype)
img[c[1]:c[3], c[0]:c[2]] = patch[:]
return img
def get_space(self, space, append_str):
"""Derive space to add to specfied integer space"""
n = str(append_str).find('%')
if n == -1:
return int(append_str)
else:
append = int(str(append_str)[0:n])
return int(space * append / 100.0)
def save_area(self, ds_key, file_name, area_img, start_index, f_reg, no_rotation=False):
"""Save given area of image file. If rotation is requested, generates it"""
stop_index = start_index + 1 if self.n_rotate[0] == 0 or no_rotation \
else start_index + self.n_rotate[0] + 1
if min(self.n_resize) > 0:
area_img = resize(area_img, self.n_resize, f_upsize=True, pad_color=self.bg_c)
bg_c = [int(x) for x in self.bg_c]
for index in range(start_index, stop_index):
fn = self.get_image_file_name(file_name, index)
f_reg.register_image(fn, area_img)
area_img = rotate(area_img, self.n_rotate[1], bg_c, keep_image=False)
return stop_index - start_index
def add_count(self, key):
if key in self.counts:
self.counts[key] += 1
else:
self.counts[key] = 1
def extract_stone_area(self, stone):
x, y, a, b, r, bw = stone
fs = self.get_space(r, self.spacing['single'])
cs = self.get_space(r, self.spacing['enclosed'])
area = None
if self.method == "single" or self.method == 's':
# Save single staying stones only
nearby_stones = self.board.stones.find_nearby((stone[GR_A], stone[GR_B]), 1)
area = [max(x-r-fs,0),
max(y-r-fs,0),
min(x+r+fs, self.board.image.shape[CV_WIDTH]),
min(y+r+fs, self.board.image.shape[CV_HEIGTH])]
self.stone_areas.extend([area])
if len(nearby_stones) > 0: area = None
if self.method == "enclosed" or self.method == 'e':
# Save enclosed staying stones only
nearby_stones = self.board.stones.find_nearby((stone[GR_A], stone[GR_B]), 1)
area = [max(x-r-cs,0),
max(y-r-cs,0),
min(x+r+cs, self.board.image.shape[CV_WIDTH]),
min(y+r+cs, self.board.image.shape[CV_HEIGTH])]
self.stone_areas.extend([area])
if len(nearby_stones) == 0: area = None
elif self.method == "both" or self.method == 'b':
# Saving all stones with different area square depending on
# whether it has other stones nearby
nearby_stones = self.board.stones.find_nearby((stone[GR_A], stone[GR_B]), 1)
if len(nearby_stones) == 0:
area = [max(x-r-fs,0),
max(y-r-fs,0),
min(x+r+fs, self.board.image.shape[CV_WIDTH]),
min(y+r+fs, self.board.image.shape[CV_HEIGTH])]
else:
area = [max(x-r-cs,0),
max(y-r-cs,0),
min(x+r+cs, self.board.image.shape[CV_WIDTH]),
min(y+r+cs, self.board.image.shape[CV_HEIGTH])]
self.stone_areas.extend([area])
return area
def extract_crossing_range(self, file_index, file_name, label, ranges):
# Get all crossings in a list
cs = self.get_space(4, self.spacing['crossing'])
crossings = []
for r in ranges:
for y in r[0]:
for x in r[1]:
stone = self.board.find_stone(c=(x,y))
if stone is None:
area = [max(x-cs-2,0),
max(y-cs-2,0),
min(x+cs+2, self.board.image.shape[CV_WIDTH]),
min(y+cs+2, self.board.image.shape[CV_HEIGTH])]
crossings.append(area)
# Prepare the writer
self.ds_writers['crossings'].set_image(file_index,
file_name,
self.board.image,
len(crossings))
# Proceed
for area in crossings:
area_img = get_image_area(self.board.image, area)
self.ds_writers['crossings'].write_area_image(area, area_img, label)
self.add_count('crossings\\' + label)
# Finalize
self.ds_writers['crossings'].finish_image()
def extract_border_crossings(self, file_index, file_name):
"""External grid crossings dataset extractor. Called from within crossings extractor"""
edges = self.board.results[GR_EDGES]
space = self.board.results[GR_SPACING]
ranges = [
# left border
(
range(int(edges[0][1]+space[1]), int(edges[1][1]-space[1]), int(space[1])),
range(int(edges[0][0]), int(edges[0][0])+1, int(space[0]))
),
# right border
(
range(int(edges[0][1]+space[1]), int(edges[1][1]-space[1]), int(space[1])),
range(int(edges[1][0]), int(edges[1][0])+1, int(space[0]))
),
# top border
(
range(int(edges[0][1]), int(edges[0][1])+1, int(space[1])),
range(int(edges[0][0]+space[0]), int(edges[1][0]-space[0]), int(space[0]))
),
# bottom border
(
range(int(edges[1][1]), int(edges[1][1])+1, int(space[1])),
range(int(edges[0][0]+space[0]), int(edges[1][0]-space[0]), int(space[0]))
)
]
self.extract_crossing_range(file_index, file_name, 'border', ranges)
def extract_inboard_crossings(self, file_index, file_name):
"""Internal grid crossing dataset extractor. Called from within crossings extractor"""
edges = self.board.results[GR_EDGES]
space = self.board.results[GR_SPACING]
ranges = [
(
range(int(edges[0][1]+space[1]), int(edges[1][1]-space[1]), int(space[1])),
range(int(edges[0][0]+space[0]), int(edges[1][0]-space[0]), int(space[0]))
)
]
self.extract_crossing_range(file_index, file_name, "cross", ranges)
def extract_edges(self, file_index, file_name):
"""Edges dataset extractor. Called from within crossings extractor"""
edges = self.board.results[GR_EDGES]
ranges = [
(
[edges[0][1]],
[edges[0][0]]
),
(
[edges[1][1]],
[edges[0][0]]
),
(
[edges[0][1]],
[edges[1][0]]
),
(
[edges[1][1]],
[edges[1][0]]
),
]
self.extract_crossing_range(file_index, file_name, 'edge', ranges)
def extract_positive(self, file_index, file_name):
"""Positives (stones) dataset extractor"""
f_reg = self.get_registrator('positive', 'positives.txt')
index = 0
for stone in self.board.all_stones:
area = self.extract_stone_area(stone)
if area is not None:
area_img = get_image_area(self.board.image, area)
n = self.save_area('positive', file_name, area_img, index, f_reg)
index += n
self.counts['positive'] += n
f_reg.close()
def extract_negative(self, file_index, file_name):
"""Negatives (empty boards) dataset extractor"""
# Prepare image with all found stones removed
neg_img = self.remove_areas(self.board.image.copy(), self.stone_areas, self.bg_c)
fn = self.get_image_file_name(file_name, 999).replace('999', 'neg')
self.save_image('negative', fn, neg_img)
# Slice prepared image by random pieces generating number of
# images not less than specified number
w = int(round(neg_img.shape[CV_WIDTH] / 4,0))
h = int(round(neg_img.shape[CV_HEIGTH] / 4,0))
nn_max = self.neg_per_image if self.neg_per_image > 0 else self.counts['positive']
f_reg = self.get_registrator('negative', 'negatives.txt')
for index in range(nn_max):
x = randrange(0, neg_img.shape[CV_WIDTH] - w)
y = randrange(0, neg_img.shape[CV_HEIGTH] - h)
area = [x, y, x + w, y + h]
if area[0] < area[2] and area[1] < area[3]:
area_img = get_image_area(neg_img, area)
n = self.save_area('negative', file_name, area_img, index, f_reg)
self.counts['negative'] += n
f_reg.close()
def extract_stones(self, file_index, file_name):
"""Stones dataset extractor"""
# Prepare the writer
self.ds_writers['stones'].set_image(file_index,
file_name,
self.board.image,
len(self.board.all_stones))
# Shuffle stones list if split is requested
stones = deepcopy(self.board.all_stones)
if self.shuffle and self.split > 0:
shuffle(stones)
# Process stones
for stone in stones:
label = 'black' if stone[GR_BW] == 'B' else 'white'
area = self.extract_stone_area(stone)
if area is not None:
area_img = get_image_area(self.board.image, area)
self.ds_writers['stones'].write_area_image(area, area_img, label)
self.add_count('stones\\' + label)
# Finalize
self.ds_writers['stones'].finish_image()
def extract_crossings(self, file_index, file_name):
"""Crossings dataset extractor"""
self.extract_edges(file_index, file_name)
self.extract_border_crossings(file_index, file_name)
if not self.no_grid:
self.extract_inboard_crossings(file_index, file_name)
def extract_bboxes(self, file_index, file_name):
"""Extractor which creates whole board description in TF-record format"""
# Prepare the writer
self.ds_writers['bboxes'].set_image(file_index,
file_name,
self.board.image,
len(self.board.all_stones))
# Generate .names and label map files
fn_map = Path(self.dirs['bboxes']).joinpath('go_board.names')
if not fn_map.exists():
with open(str(fn_map), 'w') as f_map:
f_map.write('black\nwhite\n')
f_map.close()
fn_map = Path(self.dirs['bboxes']).joinpath('go_board.pbtxt')
if not fn_map.exists():
with open(str(fn_map), 'w') as f_map:
f_map.write('item {\n\tid: 1\n\tname: \'black\'\n}\n' + \
'item {\n\tid: 2\n\tname: \'white\'\n}\n')
f_map.close()
# Resize board
if min(self.n_resize) > 0:
self.board.resize_board(self.n_resize)
# Save stones
for stone in self.board.all_stones:
area = self.extract_stone_area(stone)
if area is not None:
label = 'black' if stone[GR_BW] == STONE_BLACK else 'white'
self.ds_writers['bboxes'].write_area(area, label)
# Finalize
self.ds_writers['bboxes'].finish_image()
self.add_count('bboxes')
def one_file(self, file_index, file_name):
# Open board
print("Processing file " + str(file_name))
try:
self.board = GrBoard(str(file_name))
except:
print(sys.exc_info()[1])
return
self.bg_c = self.get_bg_color(self.board.image)
self.stone_areas = []
for k in self.counts: self.counts[k] = 0
for k in self.datasets:
extractor_fn = getattr(self, 'extract_' + k, None)
if extractor_fn is None:
raise ValueError('Cannot find a handler to generate dataset ', k)
extractor_fn(file_index, file_name)
for k in self.counts:
if k in self.totals:
self.totals[k] += self.counts[k]
else:
self.totals[k] = self.counts[k]
def get_args(self):
parser = ArgumentParser()
parser.add_argument('pattern', help = 'Selection pattern')
parser.add_argument('-p', '--positive',
help = 'Directory to store positives (images with stones) dataset')
parser.add_argument('-n', '--negative',
help = 'Directory to store negatives (images without stones) dataset')
parser.add_argument('-s', '--stones',
help = "Directory to store stones dataset (stone images, separately for black and white)")
parser.add_argument('-c', '--crossings',
help = "Directory to store line crossings and edges dataset (images of board grid lines crossings, " + \
"separately for edges, borders crossings and grid lines crossings)")
parser.add_argument('-b', '--bboxes',
help = "Directory to store bboxes dataset (one file describing all boards)")
parser.add_argument('-f', '--format',
choices=['txt', 'json', 'tf'], default = 'txt',
help="Output dataset format")
parser.add_argument('-m', '--method',
choices = ["single", "enclosed", "both"], default = "both",
help = "Stone image extration method (for all datasets except bboxes), one of: " + \
"single - extract areas of single-staying stones, " + \
"enclosed - extract areas of stones enclosed by other stones, " + \
"both - extract all stones")
parser.add_argument('--space',
nargs = '*',
default = [10, 3, 5],
help = "Space to add when extracting area for: single stones, " + \
"enclosed stones, edges/crossings " + \
"(numbers or perecentage of stone size followed by %)")
parser.add_argument('--neg-img', type=int,
default = 0,
help = 'Number of negative images to generate from one image (0 - the same number as positives)')
parser.add_argument('--resize', type=int,
nargs='*',
default=[0, 0],
help='Resize images to specified size (0 - no resizing)')
parser.add_argument('--no-grid',
action="store_true",
default = False,
help = 'Do not generate grid line crossing images')
parser.add_argument('--rotate',
type=int,
nargs=2,
default=[0, 0],
help='Two numbers specifying how many rotation images shall be created and an angle for each rotation')
parser.add_argument('--split',
type=float,
help="A float value setting dataset split to train/test datasets")
parser.add_argument('--shuffle',
action='store_true',
help="If True, file list is shuffled before splitting (by default True if split is specified)")
args = parser.parse_args()
self.dirs['positive'] = args.positive
self.dirs['stones'] = args.stones
self.dirs['negative'] = args.negative
self.dirs['crossings'] = args.crossings
self.dirs['bboxes'] = args.bboxes
self.datasets = [x for x in self.dirs if self.dirs[x] is not None]
if len(self.datasets) == 0:
raise ValueError('No datasets to generate')
self.pattern = args.pattern
self.method = args.method.lower()
self.spacing['single'] = args.space[0]
self.spacing['enclosed'] = args.space[1] if len(args.space) > 1 else 1
self.spacing['crossing'] = args.space[2] if len(args.space) > 2 else 5
self.neg_per_image = args.neg_img
self.n_resize = args.resize
self.no_grid = args.no_grid
self.n_rotate = args.rotate
self.split = args.split
self.shuffle = args.shuffle
if self.shuffle is None:
self.shuffle = self.split is not None and self.split > 0
self.format = args.format
if self.format == 'tf' and not TF_AVAIL:
print('TF-record output requested, but Tensorflow is not available. Switching to text output')
self.format = 'json'
if self.format == 'txt' and args.bboxes is not None:
raise ValueError('Cannot generate bboxes dataset in text output format')
def main(self):
try:
self.get_args()
except:
print('ERROR:', sys.exc_info()[1])
return
# Clean up target directories
dir_list = [x for x in self.dirs.values() if x is not None]
def recursive_delete(f):
if f.is_file(): f.unlink()
else:
for x in f.glob('*'):
recursive_delete(x)
for d in dir_list:
pd = Path(d)
pd.mkdir(exist_ok=True, parents=True)
recursive_delete(pd)
# Make pattern ready for glob:
# Check it is a directory and if yes, add wildcards
# If not, check for file wildcards, if none - add them
if os.path.isdir(self.pattern):
self.pattern = os.path.join(self.pattern, "*.*")
else:
head, tail = os.path.split(self.pattern)
if tail == '': pattern = os.path.join(self.pattern, "*.*")
# Load all files
file_list = []
print('Counting files...')
for x in glob.iglob(self.pattern):
if os.path.isfile(x):
if Path(x).suffix != '.gpar':
# Image files processed as is
file_list.append(str(x))
else:
# For .gpar files, try to find an image
found = False
for sx in ['.png', '.jpg', '.jpeg']:
f = Path(x).with_suffix(sx)
found = f.exists()
if found:
file_list.append(str(f))
break
if not found:
print("==> Cannot find an image which corresponds to {} param file".format(x))
# Shuffle, if requested
if self.shuffle:
print('Shuffling file list')
shuffle(file_list)
# Prepare stats
self.file_count = len(file_list)
self.totals, self.counts = {}, {}
# Get dataset writers
if self.format == 'txt':
writer_class = TxtDatasetWriter
print('Using text output format')
elif self.format == 'tf':
writer_class = TfDatasetWriter
print('Using TF-record output format')
else:
raise ValueError('Dont''t know how to handle dataset format ' + self.format)
self.ds_writers = {k: writer_class(v, self.file_count, self.split) for k, v in self.dirs.items() if v is not None}
# Process files
for n, x in enumerate(file_list):
self.one_file(n, x)
# Finalize
for w in self.ds_writers.values():
w.close()
# Show statistics
print("Dataset items created:")
for k, v in self.totals.items():
print("\t{}: {}".format(k, v))
if __name__ == '__main__':
app = DatasetGenerator()
app.main()
cv2.destroyAllWindows()
#"C:\Users\kol\Documents\kol\gbr\img\go_board_*.gpar" -b cc/bboxes --bbox-fmt tf --bbox-split 0.2 --resize 416 416
## it = tf_record_iterator("C:\\Users\\kol\\Documents\\kol\\gbr\\cc\\stones\\go_board.tfrecord")
## for n, r in enumerate(it):
## print('==> ' + str(n))
## print(r)
## print('')
| 38.275152 | 122 | 0.557463 |
d546c133c5a4debefae43963128d7b51405be44b | 1,368 | py | Python | fiasko_bro/validators/code_inclusion.py | SerejkaSJ/fiasko_bro | dfb8c30109f317c1e5b6d211e002fd148695809e | [
"MIT"
] | 25 | 2018-01-24T10:45:35.000Z | 2020-12-05T21:47:20.000Z | fiasko_bro/validators/code_inclusion.py | SerejkaSJ/fiasko_bro | dfb8c30109f317c1e5b6d211e002fd148695809e | [
"MIT"
] | 110 | 2018-01-21T12:25:13.000Z | 2021-06-10T19:27:22.000Z | fiasko_bro/validators/code_inclusion.py | SerejkaSJ/fiasko_bro | dfb8c30109f317c1e5b6d211e002fd148695809e | [
"MIT"
] | 13 | 2017-12-12T22:19:01.000Z | 2019-01-29T18:08:05.000Z | from ..utils import code_helpers
def too_difficult_by_mccabe(project_folder, max_complexity, *args, **kwargs):
violations = []
for parsed_file in project_folder.get_parsed_py_files():
violations += code_helpers.get_mccabe_violations_for_file(parsed_file.path, max_complexity)
if violations:
return ','.join(violations)
def code_too_nested(project_folder, tab_size, max_indentation_level, deep_nesting_paths_to_ignore, *args, **kwargs):
"""
Looks at the number of spaces in the beginning and decides if the code is
too nested.
As a precondition, the code has to pass indent_not_multiple_of_tab_size.
"""
for parsed_file in project_folder.get_parsed_py_files(whitelist=deep_nesting_paths_to_ignore):
lines = parsed_file.content.split('\n')
previous_line_indent = 0
for line_number, line in enumerate(lines):
indentation_spaces_amount = code_helpers.count_indentation_spaces(line, tab_size)
if (
indentation_spaces_amount > tab_size * max_indentation_level
# make sure it's not a line continuation
and indentation_spaces_amount - previous_line_indent == tab_size
):
return parsed_file.get_name_with_line(line_number)
previous_line_indent = indentation_spaces_amount
| 44.129032 | 116 | 0.708333 |
9d8c0c3bcef1cd9b3649ab8035459ffca7f6176f | 1,121 | py | Python | tests/conftest.py | ChrisPanopoulos/conda | fc81d4217f6c1804e983d1698f8b7cc5bc01874c | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | ChrisPanopoulos/conda | fc81d4217f6c1804e983d1698f8b7cc5bc01874c | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | ChrisPanopoulos/conda | fc81d4217f6c1804e983d1698f8b7cc5bc01874c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from pathlib import Path
import subprocess
import sys
import pytest
from conda.testing.fixtures import (
suppress_resource_warning,
tmpdir,
clear_subdir_cache,
)
win_default_shells = ["cmd.exe", "powershell", "git_bash", "cygwin"]
shells = ["bash", "zsh"]
if sys.platform == "win32":
shells = win_default_shells
def pytest_addoption(parser):
parser.addoption("--shell", action="append", default=[],
help="list of shells to run shell tests on")
def pytest_generate_tests(metafunc):
if 'shell' in metafunc.fixturenames:
metafunc.parametrize("shell", metafunc.config.option.shell)
@pytest.fixture(scope="session", autouse=True)
def conda_build_recipes():
test_recipes = Path(__file__).resolve().parent / "test-recipes"
recipes_to_build = ["activate_deactivate_package", "pre_link_messages_package"]
packages = [str(test_recipes / pkg) for pkg in recipes_to_build]
cmd = ["conda-build"]
cmd.extend(packages)
subprocess.run(cmd, check=True)
| 28.025 | 83 | 0.706512 |
3a5486b3be23ff4d13ec989d1159e5211d192684 | 2,081 | py | Python | juriscraper/opinions/united_states/state/vt_u.py | johnhawkinson/juriscraper | 7c0407849ce5b960b2412df7939db545c356a899 | [
"BSD-2-Clause"
] | null | null | null | juriscraper/opinions/united_states/state/vt_u.py | johnhawkinson/juriscraper | 7c0407849ce5b960b2412df7939db545c356a899 | [
"BSD-2-Clause"
] | null | null | null | juriscraper/opinions/united_states/state/vt_u.py | johnhawkinson/juriscraper | 7c0407849ce5b960b2412df7939db545c356a899 | [
"BSD-2-Clause"
] | 1 | 2021-03-03T00:03:16.000Z | 2021-03-03T00:03:16.000Z | """Scraper for the Supreme Court of Vermont
CourtID: vt
Court Short Name: VT
Author: Brian W. Carver
Date created: 18 Aug 2013
Reviewer: Mike Lissner
"""
from datetime import datetime
from six.moves.urllib.parse import urlsplit
from juriscraper.OpinionSite import OpinionSite
from lxml import html
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'https://www.vermontjudiciary.org/lc/masterpages/unpublishedeo2011-present.aspx'
def _get_download_urls(self):
path = "//div[@id='WebPartWPQ2']//td[../td[2]/text()]/a[contains(@href, 'pdf')]/@href"
return list(self.html.xpath(path))
def _get_case_names(self):
path = "//div[@id='WebPartWPQ2']//td[1][../td[2]/text()]//text()"
return list(self.html.xpath(path))
def _get_case_dates(self):
path = "//div[@id='WebPartWPQ2']//td[2]//text()"
return [datetime.strptime(s, '%m/%d/%Y') for s in self.html.xpath(path)]
def _get_precedential_statuses(self):
return ['Unpublished'] * len(self.case_names)
def _get_docket_numbers(self):
path = "//div[@id='WebPartWPQ2']//td[../td[2]/text()]/a[contains(@href, 'pdf')]/@href"
docket_numbers = []
for s in self.html.xpath(path):
# Get the docket number from the URL.
parts = urlsplit(s)
# https://www.vt.org/LC/blah/eo15-092.pdf --> '2015-092'
docket_numbers.append(
parts.path.split('/')[-1].split('.')[0].replace('eo', '20')
)
return docket_numbers
def _get_dispositions(self):
path = "//div[@id='WebPartWPQ2']//td[5][../td[2]/text()]//text()"
return list(self.html.xpath(path))
def _get_lower_court_judges(self):
path = "//div[@id='WebPartWPQ2']//td[4][../td[2]/text()]"
lc_judges = []
for e in self.html.xpath(path):
lc_judges.append(html.tostring(e, method='text', encoding='unicode'))
return lc_judges
| 35.271186 | 99 | 0.611725 |
bcff88225903d8eae8a866d6e5b8f880e19b4fd4 | 907 | py | Python | cogs/greetings.py | aomayo77/psychic-octo-chainsaw | e516dadf656f52f813b7674a228a79c7353445fa | [
"MIT"
] | 2 | 2022-01-21T02:08:38.000Z | 2022-02-09T04:46:48.000Z | cogs/greetings.py | aomayo77/psychic-octo-chainsaw | e516dadf656f52f813b7674a228a79c7353445fa | [
"MIT"
] | 3 | 2022-01-08T00:18:19.000Z | 2022-02-17T03:17:54.000Z | cogs/greetings.py | aomayo77/psychic-octo-chainsaw | e516dadf656f52f813b7674a228a79c7353445fa | [
"MIT"
] | 2 | 2022-01-21T22:40:28.000Z | 2022-01-25T20:27:13.000Z | from discord.ext import commands
import discord
import sys
sys.dont_write_bytecode = True
class greetings(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.Cog.listener()
async def on_member_join(self, member):
channel = member.guild.system_channel
if channel is not None:
await channel.send('Welcome {0.mention}.'.format(member))
@commands.command()
async def hello(self, ctx, *, member: discord.Member = None):
"""Says hello"""
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send('Hello {0.name}~'.format(member))
else:
await ctx.send('Hello {0.name}... This feels familiar.'.format(member))
self._last_member = member
def setup(bot):
bot.add_cog(greetings(bot))
| 29.258065 | 83 | 0.638368 |
2af99c115fb8ec4b999754d43fc348c43d44e0b1 | 3,700 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/crm/models/crm_team.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/crm/models/crm_team.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/crm/models/crm_team.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.tools.safe_eval import safe_eval
class Team(models.Model):
_name = 'crm.team'
_inherit = ['mail.alias.mixin', 'crm.team']
resource_calendar_id = fields.Many2one('resource.calendar', string="Working Time", help="Used to compute open days")
use_leads = fields.Boolean('Leads',
help="The first contact you get with a potential customer is a lead you qualify before converting it into a real business opportunity. Check this box to manage leads in this sales team.")
use_opportunities = fields.Boolean('Opportunities', default=True, help="Check this box to manage opportunities in this sales team.")
alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True, help="The email address associated with this team. New emails received will automatically create new leads assigned to the team.")
def get_alias_model_name(self, vals):
return 'crm.lead'
def get_alias_values(self):
has_group_use_lead = self.env.user.has_group('crm.group_use_lead')
values = super(Team, self).get_alias_values()
values['alias_defaults'] = defaults = safe_eval(self.alias_defaults or "{}")
defaults['type'] = 'lead' if has_group_use_lead and self.use_leads else 'opportunity'
defaults['team_id'] = self.id
return values
@api.onchange('use_leads', 'use_opportunities')
def _onchange_use_leads_opportunities(self):
if not self.use_leads and not self.use_opportunities:
self.alias_name = False
@api.model
def create(self, vals):
generate_alias_name = self.env['ir.values'].get_default('sales.config.settings', 'generate_sales_team_alias')
if generate_alias_name and not vals.get('alias_name'):
vals['alias_name'] = vals.get('name')
return super(Team, self).create(vals)
@api.multi
def write(self, vals):
result = super(Team, self).write(vals)
if 'use_leads' in vals or 'alias_defaults' in vals:
for team in self:
team.alias_id.write(team.get_alias_values())
return result
#TODO JEM : refactor this stuff with xml action, proper customization,
@api.model
def action_your_pipeline(self):
action = self.env.ref('crm.crm_lead_opportunities_tree_view').read()[0]
user_team_id = self.env.user.sale_team_id.id
if not user_team_id:
user_team_id = self.search([], limit=1).id
action['help'] = """<p class='oe_view_nocontent_create'>Click here to add new opportunities</p><p>
Looks like you are not a member of a sales team. You should add yourself
as a member of one of the sales team.
</p>"""
if user_team_id:
action['help'] += "<p>As you don't belong to any sales team, Odoo opens the first one by default.</p>"
action_context = safe_eval(action['context'], {'uid': self.env.uid})
if user_team_id:
action_context['default_team_id'] = user_team_id
tree_view_id = self.env.ref('crm.crm_case_tree_view_oppor').id
form_view_id = self.env.ref('crm.crm_case_form_view_oppor').id
kanb_view_id = self.env.ref('crm.crm_case_kanban_view_leads').id
action['views'] = [
[kanb_view_id, 'kanban'],
[tree_view_id, 'tree'],
[form_view_id, 'form'],
[False, 'graph'],
[False, 'calendar'],
[False, 'pivot']
]
action['context'] = action_context
return action
| 45.679012 | 227 | 0.657297 |
523e744d753bf075090775f6c04c1663f024b90e | 2,691 | py | Python | tests/cholupdates/rank_1/downdate/test_downdate_seeger.py | marvinpfoertner/cholupdates | b102504cc096ba7f298bc5a881c35472d9111b83 | [
"MIT"
] | 6 | 2020-12-15T15:17:09.000Z | 2021-04-22T16:48:17.000Z | tests/cholupdates/rank_1/downdate/test_downdate_seeger.py | marvinpfoertner/cholupdates | b102504cc096ba7f298bc5a881c35472d9111b83 | [
"MIT"
] | 22 | 2020-12-15T17:08:32.000Z | 2022-03-20T10:15:04.000Z | tests/cholupdates/rank_1/downdate/test_downdate_seeger.py | marvinpfoertner/cholupdates | b102504cc096ba7f298bc5a881c35472d9111b83 | [
"MIT"
] | null | null | null | """Specific tests for the function :func:`cholupdates.rank_1.downdate_seeger`."""
import numpy as np
import pytest
import cholupdates
@pytest.mark.parametrize(
"impl", [None] + cholupdates.rank_1.downdate_seeger.available_impls
)
def test_memory_order(L: np.ndarray, v: np.ndarray, impl: str):
"""Assert that the resulting array has the same memory order as the input array"""
L_dd = cholupdates.rank_1.downdate_seeger(L, v, impl=impl)
if L.flags.c_contiguous:
assert L_dd.flags.c_contiguous
else:
assert L.flags.f_contiguous
assert L_dd.flags.f_contiguous
@pytest.mark.parametrize(
"impl", [None] + cholupdates.rank_1.downdate_seeger.available_impls
)
def test_non_contiguous(N: int, L: np.ndarray, v: np.ndarray, impl: str):
"""Assert that a non-contiguous array leads to a `ValueError`"""
if N > 1:
L_noncontig = np.stack([np.eye(N, dtype=L.dtype) for _ in range(8)], axis=1)
with pytest.raises(ValueError):
cholupdates.rank_1.downdate_seeger(L_noncontig[:, 3, :], v, impl=impl)
v_noncontig = np.zeros((N, 3), dtype=v.dtype, order="C")
with pytest.raises(ValueError):
cholupdates.rank_1.downdate_seeger(L, v_noncontig[:, 1], impl=impl)
@pytest.mark.parametrize(
"L_dtype,v_dtype,impl",
[
(L_dtype, v_dtype, impl)
for L_dtype in [np.double, np.single, np.half, np.cdouble, np.intc]
for v_dtype in [np.double, np.single, np.half, np.cdouble, np.intc]
for impl in [None] + cholupdates.rank_1.downdate_seeger.available_impls
],
)
def test_raise_on_wrong_dtype(L_dtype: np.dtype, v_dtype: np.dtype, impl: str):
"""Tests whether a :class:`TypeError` is raised if the Cholesky factor or the vector
:code:`v` have an unsupported dtype."""
if not (L_dtype == v_dtype and L_dtype in (np.single, np.double)):
with pytest.raises(TypeError):
cholupdates.rank_1.downdate_seeger(
L=np.eye(5, dtype=L_dtype), v=np.zeros(5, dtype=v_dtype), impl=impl
)
def test_unknown_impl(L: np.ndarray, v: np.ndarray):
"""Tests whether requesting an unknown implementation results in an exception."""
with pytest.raises(NotImplementedError):
cholupdates.rank_1.downdate_seeger(L, v, impl="doesnotexist")
def test_cython_unavailable(L: np.ndarray, v: np.ndarray):
"""Tests whether requesting the Cython implementation results in an exception if it
is not available."""
if "cython" not in cholupdates.rank_1.downdate_seeger.available_impls:
with pytest.raises(NotImplementedError):
cholupdates.rank_1.downdate_seeger(L, v, impl="cython")
| 36.364865 | 88 | 0.686734 |
2f6a0e7afb59166efab6b78e66c8caf501ad04b5 | 5,861 | py | Python | api/app/utils/mongo_utils.py | crtarsorg/glasomer.rs | 3b486241ea3d127dbe95c2c90210da51a38b0f29 | [
"CC0-1.0"
] | null | null | null | api/app/utils/mongo_utils.py | crtarsorg/glasomer.rs | 3b486241ea3d127dbe95c2c90210da51a38b0f29 | [
"CC0-1.0"
] | null | null | null | api/app/utils/mongo_utils.py | crtarsorg/glasomer.rs | 3b486241ea3d127dbe95c2c90210da51a38b0f29 | [
"CC0-1.0"
] | null | null | null | # coding=utf-8
import datetime
from bson import SON
import string
class MongoUtils:
def __init__(self, mongo):
self.mongo = mongo
self.collection_name = 'answers'
def save_to_database(self, data):
self._insert(data)
def _insert(self, data):
json_doc = {
'answers': data['answers'],
'topMatches': data['matched_parties'],
'budget': data['budget'],
'timestamp': datetime.datetime.utcnow()
}
self.mongo.db[self.collection_name].insert(json_doc)
def get_total_count(self):
total = self.mongo.db[self.collection_name].count()
return total
def get_top_matches(self, place):
query = '$topMatches.' + place + '.name'
top_docs = self.mongo.db[self.collection_name].aggregate(
[
{'$group': {'_id': query, 'count': {'$sum': 1}}},
{"$sort": SON([("count", -1), ("_id", -1)])}
]
)
return top_docs['result']
def get_counts_on_budget_increase_decrease(self):
increase_budget = self.mongo.db[self.collection_name].aggregate(
[
{'$unwind': '$budget.increase'},
{'$group': {'_id': '$budget.increase', 'count': {'$sum': 1}}},
{"$sort": SON([("count", -1), ("_id", -1)])}
]
)
decrease_budget = self.mongo.db[self.collection_name].aggregate(
[
{'$unwind': '$budget.decrease'},
{'$group': {'_id': '$budget.decrease', 'count': {'$sum': 1}}},
{"$sort": SON([("count", -1), ("_id", -1)])}
]
)
main_json = {
'increase': self.structure_sub_json(increase_budget['result']),
'decrease': self.structure_sub_json(decrease_budget['result'])
}
return main_json
def structure_sub_json(self, item_array):
json_doc = {}
for item in item_array:
json_doc[self.convert_case(item['_id'])] = item['count']
return json_doc
def get_insights(self):
vaz = []
vaz = self.pipeline_build(["Važno"])
nije_vaz = self.pipeline_build(["Nije važno"])
manje_vaz = self.pipeline_build(["Manje važno"])
veoma = self.pipeline_build(["Veoma važno"])
all_tp = self.pipeline_build(["", "Važno", "Nije važno", "Manje važno", "Veoma važno"])
# Query
all_pipe = self.mongo.db[self.collection_name].aggregate(pipeline = [
{
"$unwind": "$answers"
},
{
"$group": {
"_id": {"qst":"$answers.question"},
"counter": {"$sum": 1}
}
},
{
"$sort": SON([("counter", 1), ("_id.qst", 1)])
},
{
"$project": {
"_id": 0,
"question": "$_id.qst",
"importance": "$_id.importance",
"totalAnswers": "$counter"
}
}
])
vazno = self.mongo.db[self.collection_name].aggregate(vaz)
nije_vazno = self.mongo.db[self.collection_name].aggregate(nije_vaz)
manje_vazno = self.mongo.db[self.collection_name].aggregate(manje_vaz)
veoma_vazno = self.mongo.db[self.collection_name].aggregate(veoma)
for ind, itm in enumerate(all_pipe['result']):
counter = 0
# Vazno
for elem in vazno['result']:
if itm['question'] == elem['question']:
itm["vazno"] = elem['totalAnswers']
counter += elem['totalAnswers']
all_pipe['result'][ind] = itm
# nije_vazno
for elem in nije_vazno['result']:
if itm['question'] == elem['question']:
itm["NijeVazno"] = elem['totalAnswers']
counter += elem['totalAnswers']
all_pipe['result'][ind] = itm
# manje_vazno
for elem in manje_vazno['result']:
if itm['question'] == elem['question']:
itm["manjeVazno"] = elem['totalAnswers']
counter += elem['totalAnswers']
all_pipe['result'][ind] = itm
# veoma_vazno
for elem in veoma_vazno['result']:
if itm['question'] == elem['question']:
itm["veomaVazno"] = elem['totalAnswers']
counter += elem['totalAnswers']
all_pipe['result'][ind] = itm
itm['noAnswer'] = itm['totalAnswers'] - counter
all_pipe['result'][ind] = itm
return all_pipe['result']
@staticmethod
def convert_case(name):
string_key = string.capwords(name).replace(' ', '')
return string_key[:1].lower() + string_key[1:]
def pipeline_build(self, value):
pipeline = [
{
"$unwind": "$answers"
},
{
"$match": {
"answers.parties.Vaš odgovor.importance": {"$in":value}
}
},
{
"$group": {
"_id": {"qst":"$answers.question", "importance": "$answers.parties.Vaš odgovor.importance"},
"counter": {"$sum": 1}
}
},
{
"$sort": SON([("counter", 1), ("_id.qst", 1)])
},
{
"$project": {
"_id": 0,
"question": "$_id.qst",
"importance": "$_id.importance",
"totalAnswers": "$counter"
}
}
]
return pipeline
| 31.510753 | 112 | 0.465791 |
6cce6198d1f8e9ebd964cd66441782c456047f52 | 286 | py | Python | Contributions/Roll_the_dice.py | OluSure/Hacktoberfest2021-1 | ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea | [
"MIT"
] | 215 | 2021-10-01T08:18:16.000Z | 2022-03-29T04:12:03.000Z | Contributions/Roll_the_dice.py | OluSure/Hacktoberfest2021-1 | ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea | [
"MIT"
] | 51 | 2021-10-01T08:16:42.000Z | 2021-10-31T13:51:51.000Z | Contributions/Roll_the_dice.py | OluSure/Hacktoberfest2021-1 | ad1bafb0db2f0cdeaae8f87abbaa716638c5d2ea | [
"MIT"
] | 807 | 2021-10-01T08:11:45.000Z | 2021-11-21T18:57:09.000Z | import random
min = 1
max = 6
roll_again = "yes"
while roll_again == "yes" or roll_again == "y":
print("Rolling the dices...")
print("The values are....")
print(random.randint(min, max))
print(random.randint(min, max))
roll_again = input("Roll the dices again?")
| 20.428571 | 47 | 0.632867 |
46b4870e5130ab5fdc5295bc7e5ff7ba2c11ccd9 | 108 | py | Python | responsibleai/responsibleai/_tools/causal/__init__.py | ezherdeva/responsible-ai-toolbox | 70895df616ca6f78ec83740e7705f641ef32c127 | [
"MIT"
] | 119 | 2021-12-02T21:00:47.000Z | 2022-03-31T06:44:31.000Z | responsibleai/responsibleai/_tools/causal/__init__.py | ezherdeva/responsible-ai-toolbox | 70895df616ca6f78ec83740e7705f641ef32c127 | [
"MIT"
] | 293 | 2021-11-30T16:45:49.000Z | 2022-03-31T23:57:13.000Z | responsibleai/responsibleai/_tools/causal/__init__.py | ezherdeva/responsible-ai-toolbox | 70895df616ca6f78ec83740e7705f641ef32c127 | [
"MIT"
] | 28 | 2021-12-07T17:28:04.000Z | 2022-03-31T07:47:11.000Z | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Utilities for causal module."""
| 21.6 | 37 | 0.740741 |
1bf4e63c02f7d6bc0f1666e156b43a3f038671bf | 391 | py | Python | everest/ptolemaic/datalike/primary/_primary.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 2 | 2020-12-17T02:27:28.000Z | 2020-12-17T23:50:13.000Z | everest/ptolemaic/datalike/primary/_primary.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 1 | 2020-12-07T10:14:45.000Z | 2020-12-07T10:14:45.000Z | everest/ptolemaic/datalike/primary/_primary.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 1 | 2020-10-22T11:16:50.000Z | 2020-10-22T11:16:50.000Z | ###############################################################################
''''''
###############################################################################
from . import _Datalike
class Primary(_Datalike):
...
###############################################################################
###############################################################################
| 26.066667 | 79 | 0.102302 |
f546adf3bdbb430348abf209bf7808a16f76677b | 573 | py | Python | plotly/validators/histogram2dcontour/_autocolorscale.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/histogram2dcontour/_autocolorscale.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/histogram2dcontour/_autocolorscale.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='autocolorscale',
parent_name='histogram2dcontour',
**kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
implied_edits=kwargs.pop('implied_edits', {}),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 28.65 | 77 | 0.623037 |
d33020e252808d4eac94796f998e409754a9354f | 6,530 | py | Python | tests/test_tags.py | zmdismai/tcf | 3903e0a2f444c3aa14647a5147a0df76a49e4195 | [
"Apache-2.0"
] | null | null | null | tests/test_tags.py | zmdismai/tcf | 3903e0a2f444c3aa14647a5147a0df76a49e4195 | [
"Apache-2.0"
] | null | null | null | tests/test_tags.py | zmdismai/tcf | 3903e0a2f444c3aa14647a5147a0df76a49e4195 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import re
import os
import sys
import unittest
import commonl.testing
import tcfl
import tcfl.tc
_src = os.path.abspath(__file__)
_srcdir = os.path.dirname(_src)
class _test(unittest.TestCase,
commonl.testing.test_ttbd_mixin):
longMessage = True
@classmethod
def setUpClass(cls):
cls.src = _src
commonl.testing.test_ttbd_mixin.setUpClass(
ttbd_config_files = [
os.path.join(_srcdir, "conf_00_lib.py"),
os.path.join(_srcdir, "conf_07_zephyr.py"),
os.path.join(_srcdir, "conf_zephyr_tests.py"),
])
@classmethod
def tearDownClass(cls):
commonl.testing.test_ttbd_mixin.tearDownClass()
pass
def test_00(self):
with self.assertRaises(tcfl.tc.blocked_e):
@tcfl.tc.tags() # pylint: disable = unused-variable
class _test_00(tcfl.tc.tc_c):
# Request no tags, it shall fail
pass
@tcfl.tc.tags("tag1", tag2 = "value2")
class _test_01(tcfl.tc.tc_c):
"""
Request boolean and keyword tags
"""
def eval(self):
assert 'tag1' in self._tags
assert self.tag_get('tag1', None)[0] == True
assert 'tag2' in self._tags
assert self.tag_get('tag2', None)[0] == "value2"
def test_01(self):
r = self._tcf_run_cut()
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags("tag1")
class _test_02(tcfl.tc.tc_c):
"""
Request a boolean tag
"""
def eval(self):
assert 'tag1' in self._tags
assert self.tag_get('tag1', None)[0] == True
def test_02(self):
r = self._tcf_run_cut()
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags(tag2 = "value2")
class _test_03(tcfl.tc.tc_c):
"""
Request a keyword tag
"""
def eval(self):
assert 'tag2' in self._tags
assert self.tag_get('tag2', None)[0] == "value2"
def test_03(self):
r = self._tcf_run_cut()
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags("tag1")
class _test_04(tcfl.tc.tc_c):
"""
Request boolean tag, filter on it being there
"""
def eval(self):
assert 'tag1' in self._tags
assert self.tag_get('tag1', None)[0] == True
def test_04(self):
def say_tags(args):
args.tags_spec.append("tag1")
r = self._tcf_run_cut(args_fn = say_tags)
self.assert_in_tcf_log("_test_04 @local: selected by tag specification")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags("tag1")
class _test_05(tcfl.tc.tc_c):
"""
Request boolean tag, filter on it not being there
"""
def eval(self):
assert 'tag1' in self._tags
assert self.tag_get('tag1', None)[0] == True
def test_05(self):
def say_tags(args):
args.tags_spec.append("not tag1")
r = self._tcf_run_cut(args_fn = say_tags)
self.assert_in_tcf_log(
"_test_05 @local: skipped: because of tag specification")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags("tag1")
class _test_06(tcfl.tc.tc_c):
"""
Request boolean tag, filter on it not being there
"""
def eval(self):
assert 'tag2' not in self._tags
def test_06(self):
def say_tags(args):
args.tags_spec.append("not tag2")
r = self._tcf_run_cut(args_fn = say_tags)
self.assert_in_tcf_log(
"_test_06 @local: selected by tag specification")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags(tag2 = "value2")
class _test_06b(tcfl.tc.tc_c):
"""
Request keyword tag, filter on the value being there
"""
def eval(self):
assert 'tag2' in self._tags
assert self.tag_get('tag2', None)[0] == "value2"
def test_06b(self):
def say_tags(args):
args.tags_spec.append("tag2 == 'value2'")
r = self._tcf_run_cut(args_fn = say_tags)
self.assert_in_tcf_log(
"_test_06b @local: selected by tag specification")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags(tag2 = "value2")
class _test_07(tcfl.tc.tc_c):
"""
Request keyword tag, filter on the value not being something
"""
def eval(self):
assert 'tag2' in self._tags
assert self.tag_get('tag2', None)[0] == "value2"
def test_07(self):
def say_tags(args):
args.tags_spec.append("tag2 == 'value3'")
r = self._tcf_run_cut(args_fn = say_tags)
self.assert_in_tcf_log(
"_test_07 @local: skipped: because of tag specification")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags("skip")
class _test_08(tcfl.tc.tc_c):
"""
Request 'skip' tag, the TC is skipped
"""
def eval(self):
assert 'skip' in self._tags
assert self.tag_get('skip', None)[0] == True
def test_08(self):
r = self._tcf_run_cut()
self.assert_in_tcf_log(
"_test_08 @local: skipped: because of 'skip' tag @")
self.assert_in_tcf_log("1 skipped")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
@tcfl.tc.tags(skip = "because I want")
class _test_09(tcfl.tc.tc_c):
"""
Request 'skip' tag, the TC is skipped
"""
def eval(self):
assert 'skip' in self._tags
assert self.tag_get('skip', None)[0] == True
def test_09(self):
r = self._tcf_run_cut()
self.assert_in_tcf_log(re.compile(
"_test_09 @local: skipped: "
"because of 'skip' tag @.*because I want"))
self.assert_in_tcf_log("1 skipped")
self.assert_not_in_tcf_log("AssertionError")
self.assertEqual(r, 0)
if __name__ == "__main__":
commonl.testing.logging_init(sys.argv)
unittest.main()
| 28.893805 | 80 | 0.580858 |
121e02fff52791903c4202119461a1772c5cd5ab | 52 | py | Python | cruft/__main__.py | tdhopper/cruft | 37e46693ecdd3467bd01b022447e100163f4733a | [
"MIT"
] | 293 | 2020-08-18T05:52:45.000Z | 2022-03-31T20:39:43.000Z | cruft/__main__.py | tdhopper/cruft | 37e46693ecdd3467bd01b022447e100163f4733a | [
"MIT"
] | 102 | 2020-08-28T16:38:34.000Z | 2022-03-31T11:01:41.000Z | cruft/__main__.py | tdhopper/cruft | 37e46693ecdd3467bd01b022447e100163f4733a | [
"MIT"
] | 36 | 2020-08-28T16:34:10.000Z | 2022-03-31T21:55:53.000Z | from cruft import _cli
_cli.app(prog_name="cruft")
| 13 | 27 | 0.769231 |
2cdf3fd58f94a9ae1dc485458b8ea8f2134b99d8 | 6,953 | py | Python | python_code/client.py | nivertech/ethersecret | 1cae5bd62ff4dbbdcc572857fc2aeb19fab698fb | [
"Apache-2.0"
] | 1 | 2018-04-09T07:08:46.000Z | 2018-04-09T07:08:46.000Z | python_code/client.py | nivertech/ethersecret | 1cae5bd62ff4dbbdcc572857fc2aeb19fab698fb | [
"Apache-2.0"
] | 2 | 2017-03-29T11:58:02.000Z | 2017-03-29T20:42:17.000Z | python_code/client.py | nivertech/ethersecret | 1cae5bd62ff4dbbdcc572857fc2aeb19fab698fb | [
"Apache-2.0"
] | 4 | 2017-03-31T06:02:51.000Z | 2018-10-31T03:57:49.000Z | from pycoin.serialize import b2h, h2b
from pycoin import encoding
import rlp
from ethereum import tester, utils, abi, blocks, transactions
import requests
import json
import jsonrpc
import time
from ethereum.abi import ContractTranslator
from ethereum.utils import mk_contract_address
from eth_warpper import *
#import node
import xmlrpclib
is_localhost = False
if(is_localhost):
sever1 = xmlrpclib.ServerProxy('http://localhost:8001')
sever2 = xmlrpclib.ServerProxy('http://localhost:8002')
sever3 = xmlrpclib.ServerProxy('http://localhost:8003')
else:
sever1 = xmlrpclib.ServerProxy('http://10.101.154.71:8001')
sever2 = xmlrpclib.ServerProxy('http://10.79.151.81:8001')
sever3 = xmlrpclib.ServerProxy('http://10.65.213.19:8001')
def xor( x, y ):
a = bytearray(x)
b = bytearray(y)
z = bytearray(32)
result = h2b("")
for i in range(32):
z[i] = a[i] ^ b[i]
result = result + h2b("%02X" % z[i])
return result
def encrypt( text, key ):
if( len(text) != 32 or len(key) != 32 ):
return None
return xor(text,key)
def decrypt( chiper, key ):
if( len(chiper) != 32 or len(key) != 32 ):
return None
return xor(chiper,key)
def CreateNodeKeys( key, rand1, rand2, rand3, node_ind ):
if( node_ind == 1 ):
clientKey1 = xor(key,rand1)
clientKey2 = xor(xor(key,rand1),rand2)
clientKey3 = xor(xor(key,rand1),rand3)
elif( node_ind == 2 ):
clientKey1 = xor(key,rand2)
clientKey2 = xor(xor(key,rand2),rand1)
clientKey3 = xor(xor(key,rand2),rand3)
elif( node_ind == 3 ):
clientKey1 = xor(key,rand3)
clientKey2 = xor(xor(key,rand3),rand1)
clientKey3 = xor(xor(key,rand3),rand2)
return [clientKey1, clientKey2, clientKey3]
def upload_enc_secret_to_blockchain_and_send_keys_to_nodes( secret, secret_index, ethereum_key ):
key = utils.sha3("password")
rand1 = utils.sha3("rand1")
rand2 = utils.sha3("rand2")
rand3 = utils.sha3("rand3")
enc_secret = encrypt(secret, key)
keys1 = CreateNodeKeys( key, rand1, rand2, rand3, 1 )
keys2 = CreateNodeKeys( key, rand1, rand2, rand3, 2 )
keys3 = CreateNodeKeys( key, rand1, rand2, rand3, 3 )
print "uploading encrypted secret to blockchain"
newSecret( ethereum_key, enc_secret )
# TODO - send to nodes
keys1 = [b2h(keys1[0]), b2h(keys1[1]), b2h(keys1[2])]
keys2 = [b2h(keys2[0]), b2h(keys2[1]), b2h(keys2[2])]
keys3 = [b2h(keys3[0]), b2h(keys3[1]), b2h(keys3[2])]
#print str(keys1)
print "send key share 1 to server 1"
sever1.submit_keys( secret_index, keys1, 1 )
print "send key share 2 to server 2"
sever2.submit_keys( secret_index, keys2, 2 )
print "send key share 3 to server 3"
sever3.submit_keys( secret_index, keys3, 3 )
def key12( xor_key_rand1, xor_key_rand2, xor_key_rand1_rand2 ):
key = xor( xor( xor_key_rand1, xor_key_rand2 ), xor_key_rand1_rand2 )
return key
def key13( xor_key_rand1, xor_key_rand3, xor_key_rand1_rand3 ):
key = xor( xor( xor_key_rand1, xor_key_rand3 ), xor_key_rand1_rand3 )
return key
def key23( xor_key_rand2, xor_key_rand3, xor_key_rand2_rand3 ):
key = xor( xor( xor_key_rand2, xor_key_rand3 ), xor_key_rand2_rand3 )
return key
def read_and_decrypt_secret( secret_index, ethereum_key ):
# 1) read encrypted secret
print "read raw encrypted data from blockchain"
data = getEncData( ethereum_key, secret_index)
# 2) ask for keys and get at least two - TODO
print "requesting key share 1 from server 1"
keys1 = sever1.get_keys(b2h(ethereum_key), secret_index, 1)
print "requesting key share 2 from server 2"
keys2 = sever2.get_keys(b2h(ethereum_key), secret_index, 2)
print "requesting key share 3 from server 3"
keys3 = sever3.get_keys(b2h(ethereum_key), secret_index, 3)
if( len(keys1) == 3 ):
print "received receive key share 1 from server 1"
keys1 = [ h2b(keys1[0]),h2b(keys1[1]),h2b(keys1[2])]
else:
print "didn't received receive key share 1 from server 1"
keys1 = None
if( len(keys2) == 3 ):
print "received receive key share 2 from server 2"
keys2 = [ h2b(keys2[0]),h2b(keys2[1]),h2b(keys2[2])]
else:
print "didn't received receive key share 2 from server 2"
keys2 = None
if( len(keys3) == 3 ):
print "received receive key share 3 from server 3"
keys3 = [ h2b(keys3[0]),h2b(keys3[1]),h2b(keys3[2])]
else:
print "didn't received receive key share 3 from server 3"
keys3 = None
key = None
if( not (keys1 is None) and not (keys2 is None) ):
key = key12( keys1[0], keys2[0], keys1[1])
elif( not (keys1 is None) and not (keys3 is None) ):
key = key13( keys1[0], keys3[0], keys1[2])
elif( not (keys2 is None) and not (keys3 is None) ):
key = key23( keys2[0], keys3[0], keys2[2])
else:
print "cannot decrypt, less than 2 servers sent keys"
return h2b("00") * 32
return decrypt(data,key)
'''
secret = h2b("c0dec0defacefeed") * 4
secret_index = 3
ethereum_key = utils.sha3("Smart Pool2")
################################################################################
upload_enc_secret_to_blockchain_and_send_keys_to_nodes( secret, secret_index, ethereum_key )
print "upload done"
data = read_and_decrypt_secret(secret_index, ethereum_key)
print b2h(data)
set_prem( ethereum_key, secret_index, "0x6d87462cB31C1217cf1eD61B4FCC37F823c61624", 0, 2490817702 )
print "set prem"
data = read_and_decrypt_secret(secret_index, ethereum_key)
print b2h(data)
'''
'''
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 1024
MESSAGE = "Hello, World!"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
s.send(MESSAGE)
data = s.recv(BUFFER_SIZE)
s.close()
print "received data:", data
'''
ethereum_key = utils.sha3("Smart Pool2")
secret = h2b("45746865722050726976616379205465616d") + h2b("00") * 14
#secret = h2b("476176696e206973207361746f736869") + h2b("00") * 16
#secret = h2b("c0dec0defacefeed") * 4
secret_index = 0
print "secret index " + str(secret_index)
################################################################################
upload_enc_secret_to_blockchain_and_send_keys_to_nodes( secret, secret_index, ethereum_key )
data = read_and_decrypt_secret(secret_index, ethereum_key)
print "decrytion returned: " + b2h(data)
print "giving read access to 0x6d87462cB31C1217cf1eD61B4FCC37F823c61624 for 3 hours"
set_prem( ethereum_key, secret_index, "0x6d87462cB31C1217cf1eD61B4FCC37F823c61624", 0, 2490817702 )
data = read_and_decrypt_secret(secret_index, ethereum_key)
print "decrytion returned: " + data
#print b2h(data) | 29.969828 | 99 | 0.649504 |
c1932e3ae23229bbca9058085709abed0caea4b9 | 245 | py | Python | openjij/utils/__init__.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | 61 | 2019-01-05T13:37:10.000Z | 2022-03-11T02:11:08.000Z | openjij/utils/__init__.py | zeta1999/OpenJij | 0fe03f07af947f519a32ad58fe20423919651634 | [
"Apache-2.0"
] | 79 | 2019-01-29T09:55:20.000Z | 2022-02-19T04:06:20.000Z | openjij/utils/__init__.py | 29rou/OpenJij | c2579fba8710cf82b9e6761304f0042b365b595c | [
"Apache-2.0"
] | 21 | 2019-01-07T07:55:10.000Z | 2022-03-08T14:27:23.000Z | from .benchmark import solver_benchmark, residual_energy, time_to_solution, success_probability, se_residual_energy, se_success_probability, se_upper_tts, se_lower_tts
from .res_convertor import convert_response
import openjij.utils.graph_utils
| 61.25 | 167 | 0.885714 |
54db07d659377e33f356092f523863a15192bdab | 3,262 | py | Python | tbd/preprocessing.py | jiaweih/TBD | f9c2532b26f063ba72733ea5d18c1499a92376d5 | [
"MIT"
] | null | null | null | tbd/preprocessing.py | jiaweih/TBD | f9c2532b26f063ba72733ea5d18c1499a92376d5 | [
"MIT"
] | null | null | null | tbd/preprocessing.py | jiaweih/TBD | f9c2532b26f063ba72733ea5d18c1499a92376d5 | [
"MIT"
] | null | null | null | """Process the protein sequence into specified data format for modeling.
"""
import pickle
import pandas as pd
def set_length_limit(df, length_limit=40, verbose=True):
"""Return sequences of length above specified length_limit.
Args:
df (dataframe): dataframe of protein sequence
length_limit (int, optional): specified protein length limit.
Defaults to 40.
Returns:
dataframe: sequences with length above specified length_limit
"""
df.loc[:, 'length'] = df.loc[:, 'sequence'].map(lambda x: len(x))
if verbose:
print(f'Before setting length limit of {length_limit}: ')
print(df.describe())
print()
else:
pass
df = df.loc[df.length >= length_limit]
if verbose:
print(f'After setting length limit of {length_limit}: ')
print(df.describe())
print()
else:
pass
return df
def clean_disordered_sequence(infile_disordered):
"""Clean the disordered protein sequence.
Args:
infile_disordered ([str]): filename or path to the disordered protein
sequence in csv format.
Returns:
dataframe: cleaned dataframe of disordered protein sequence.
"""
df_disordered = pd.read_csv(infile_disordered)
df_disordered = df_disordered[~df_disordered.sequence.isnull()]
df_disordered = df_disordered.drop_duplicates()
df_disordered = set_length_limit(df_disordered)
return df_disordered
def clean_ordered_sequence(infile_ordered_1, infile_ordered_2):
"""Clean ordered protein sequence.
Args:
infile_ordered_1 (str): filename of ordered protein in csv format
infile_ordered_2 (str): filename of ordered protein in csv format
Returns:
dataframe: cleaned sequence with lengh above specified length limit.
"""
df1 = pd.read_csv(infile_ordered_1)
df2 = pd.read_csv(infile_ordered_2)
df_ordered = df1.append(df2)
df_ordered = df_ordered.loc[:, ['Sequence']]
df_ordered = df_ordered.rename(columns={'Sequence': 'sequence'})
df_ordered = df_ordered[~df_ordered.sequence.isnull()]
df_ordered = df_ordered.drop_duplicates()
df_ordered = set_length_limit(df_ordered)
return df_ordered
def save_processed_data(array_ordered, labels_ordered,
array_disordered, labels_disordered,
processed_data_file="data/protein_processed_data.pkl"):
"""Save the processed data in a pickle format.
Args:
array_ordered (np.ndarray): three-dimensional array of features
for ordered protein sequence
labels_ordered (np.ndarray): one-dimensional array of labels
for ordered protein sequence
array_disordered (np.ndarray): three-dimensional array of features
for disordered protein sequence
labels_disordered (np.ndarray): one-dimensional array of labels
for disordered protein sequence
"""
dict_data = {
"array_ordered": array_ordered,
"labels_ordered": labels_ordered,
"array_disordered": array_disordered,
"labels_disordered": labels_disordered
}
with open(processed_data_file, "wb") as f_write:
pickle.dump(dict_data, f_write)
| 33.979167 | 79 | 0.681484 |
c8703af1afe6bb4c6a6cb54211bbe29122092514 | 20,073 | py | Python | torch/onnx/__init__.py | mcx/pytorch | b02b3f25dbffeead4f06d32e75dd4a282e8b0b5f | [
"Intel"
] | 1 | 2022-03-11T22:08:48.000Z | 2022-03-11T22:08:48.000Z | torch/onnx/__init__.py | sascristian/pytorch | f09c696ecda155c42006f85d2687445dfb24774f | [
"Intel"
] | null | null | null | torch/onnx/__init__.py | sascristian/pytorch | f09c696ecda155c42006f85d2687445dfb24774f | [
"Intel"
] | null | null | null | import torch._C as _C
from typing import Dict, Optional
TensorProtoDataType = _C._onnx.TensorProtoDataType
OperatorExportTypes = _C._onnx.OperatorExportTypes
TrainingMode = _C._onnx.TrainingMode
_CAFFE2_ATEN_FALLBACK = _C._onnx._CAFFE2_ATEN_FALLBACK
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
producer_name = "pytorch"
producer_version = _C._onnx.PRODUCER_VERSION
class ExportTypes:
r""""Specifies how the ONNX model is stored."""
PROTOBUF_FILE = "Saves model in the specified protobuf file."
ZIP_ARCHIVE = "Saves model in the specified ZIP file (uncompressed)."
COMPRESSED_ZIP_ARCHIVE = "Saves model in the specified ZIP file (compressed)."
DIRECTORY = "Saves model in the specified folder."
class CheckerError(Exception):
r"""Raised when ONNX checker detects an invalid model."""
pass
class SymbolicContext:
r"""Provides extra context for symbolic functions.
Args:
params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue.
env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value.
cur_node (_C.Node): Current node being converted to ONNX domain.
onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to.
"""
def __init__(self, params_dict, env, cur_node, onnx_block):
self.params_dict: Dict[str, _C.IValue] = params_dict
self.env: Dict[_C.Value, _C.Value] = env
# Current node that is being converted.
self.cur_node: _C.Node = cur_node
# Current onnx block that converted nodes are being appended to.
self.onnx_block: _C.Block = onnx_block
def _export(*args, **kwargs):
from torch.onnx import utils
result = utils._export(*args, **kwargs)
return result
def export(model, args, f, export_params=True, verbose=False, training=TrainingMode.EVAL,
input_names=None, output_names=None, operator_export_type=OperatorExportTypes.ONNX,
opset_version=None, do_constant_folding=True, dynamic_axes=None,
keep_initializers_as_inputs=None, custom_opsets=None,
export_modules_as_functions=False):
r"""
Exports a model into ONNX format. If ``model`` is not a
:class:`torch.jit.ScriptModule` nor a :class:`torch.jit.ScriptFunction`, this runs
``model`` once in order to convert it to a TorchScript graph to be exported
(the equivalent of :func:`torch.jit.trace`). Thus this has the same limited support
for dynamic control flow as :func:`torch.jit.trace`.
Args:
model (torch.nn.Module, torch.jit.ScriptModule or torch.jit.ScriptFunction):
the model to be exported.
args (tuple or torch.Tensor):
args can be structured either as:
1. ONLY A TUPLE OF ARGUMENTS::
args = (x, y, z)
The tuple should contain model inputs such that ``model(*args)`` is a valid
invocation of the model. Any non-Tensor arguments will be hard-coded into the
exported model; any Tensor arguments will become inputs of the exported model,
in the order they occur in the tuple.
2. A TENSOR::
args = torch.Tensor([1])
This is equivalent to a 1-ary tuple of that Tensor.
3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::
args = (x,
{'y': input_y,
'z': input_z})
All but the last element of the tuple will be passed as non-keyword arguments,
and named arguments will be set from the last element. If a named argument is
not present in the dictionary, it is assigned the default value, or None if a
default value is not provided.
.. note::
If a dictionary is the last element of the args tuple, it will be
interpreted as containing named arguments. In order to pass a dict as the
last non-keyword arg, provide an empty dict as the last element of the args
tuple. For example, instead of::
torch.onnx.export(
model,
(x,
# WRONG: will be interpreted as named arguments
{y: z}),
"test.onnx.pb")
Write::
torch.onnx.export(
model,
(x,
{y: z},
{}),
"test.onnx.pb")
f: a file-like object (such that ``f.fileno()`` returns a file descriptor)
or a string containing a file name. A binary protocol buffer will be written
to this file.
export_params (bool, default True): if True, all parameters will
be exported. Set this to False if you want to export an untrained model.
In this case, the exported model will first take all of its parameters
as arguments, with the ordering as specified by ``model.state_dict().values()``
verbose (bool, default False): if True, prints a description of the
model being exported to stdout. In addition, the final ONNX graph will include the
field ``doc_string``` from the exported model which mentions the source code locations
for ``model``. If True, ONNX exporter logging will be turned on.
training (enum, default TrainingMode.EVAL):
* ``TrainingMode.EVAL``: export the model in inference mode.
* ``TrainingMode.PRESERVE``: export the model in inference mode if model.training is
False and in training mode if model.training is True.
* ``TrainingMode.TRAINING``: export the model in training mode. Disables optimizations
which might interfere with training.
input_names (list of str, default empty list): names to assign to the
input nodes of the graph, in order.
output_names (list of str, default empty list): names to assign to the
output nodes of the graph, in order.
operator_export_type (enum, default OperatorExportTypes.ONNX):
* ``OperatorExportTypes.ONNX``: Export all ops as regular ONNX ops
(in the default opset domain).
* ``OperatorExportTypes.ONNX_FALLTHROUGH``: Try to convert all ops
to standard ONNX ops in the default opset domain. If unable to do so
(e.g. because support has not been added to convert a particular torch op to ONNX),
fall back to exporting the op into a custom opset domain without conversion. Applies
to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_
as well as ATen ops. For the exported model to be usable, the runtime must support
these non-standard ops.
* ``OperatorExportTypes.ONNX_ATEN``: All ATen ops (in the TorchScript namespace "aten")
are exported as ATen ops (in opset domain "org.pytorch.aten").
`ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so
this instructs the runtime to use PyTorch's implementation of these ops.
.. warning::
Models exported this way are probably runnable only by Caffe2.
This may be useful if the numeric differences in implementations of operators are
causing large differences in behavior between PyTorch and Caffe2 (which is more
common on untrained models).
* ``OperatorExportTypes.ONNX_ATEN_FALLBACK``: Try to export each ATen op
(in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so
(e.g. because support has not been added to convert a particular torch op to ONNX),
fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for
context.
For example::
graph(%0 : Float):
%3 : int = prim::Constant[value=0]()
# conversion unsupported
%4 : Float = aten::triu(%0, %3)
# conversion supported
%5 : Float = aten::mul(%4, %0)
return (%5)
Assuming ``aten::triu`` is not supported in ONNX, this will be exported as::
graph(%0 : Float):
%1 : Long() = onnx::Constant[value={0}]()
# not converted
%2 : Float = aten::ATen[operator="triu"](%0, %1)
# converted
%3 : Float = onnx::Mul(%2, %0)
return (%3)
If PyTorch was built with Caffe2 (i.e. with ``BUILD_CAFFE2=1``), then
Caffe2-specific behavior will be enabled, including special support
for ops are produced by the modules described in
`Quantization <https://pytorch.org/docs/stable/quantization.html>`_.
.. warning::
Models exported this way are probably runnable only by Caffe2.
opset_version (int, default 13): The version of the
`default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_
to target. Must be >= 7 and <= 16.
do_constant_folding (bool, default True): Apply the constant-folding optimization.
Constant-folding will replace some of the ops that have all constant inputs
with pre-computed constant nodes.
dynamic_axes (dict<string, dict<int, string>> or dict<string, list(int)>, default empty dict):
By default the exported model will have the shapes of all input and output tensors
set to exactly match those given in ``args``. To specify axes of tensors as
dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema:
* KEY (str): an input or output name. Each name must also be provided in ``input_names`` or
``output_names``.
* VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a
list, each element is an axis index.
For example::
class SumModule(torch.nn.Module):
def forward(self, x):
return torch.sum(x, dim=1)
torch.onnx.export(SumModule(), (torch.ones(2, 2),), "onnx.pb",
input_names=["x"], output_names=["sum"])
Produces::
input {
name: "x"
...
shape {
dim {
dim_value: 2 # axis 0
}
dim {
dim_value: 2 # axis 1
...
output {
name: "sum"
...
shape {
dim {
dim_value: 2 # axis 0
...
While::
torch.onnx.export(SumModule(), (torch.ones(2, 2),), "onnx.pb",
input_names=["x"], output_names=["sum"],
dynamic_axes={
# dict value: manually named axes
"x": {0: "my_custom_axis_name"},
# list value: automatic names
"sum": [0],
})
Produces::
input {
name: "x"
...
shape {
dim {
dim_param: "my_custom_axis_name" # axis 0
}
dim {
dim_value: 2 # axis 1
...
output {
name: "sum"
...
shape {
dim {
dim_param: "sum_dynamic_axes_1" # axis 0
...
keep_initializers_as_inputs (bool, default None): If True, all the
initializers (typically corresponding to parameters) in the
exported graph will also be added as inputs to the graph. If False,
then initializers are not added as inputs to the graph, and only
the non-parameter inputs are added as inputs.
This may allow for better optimizations (e.g. constant folding) by
backends/runtimes.
If ``opset_version < 9``, initializers MUST be part of graph
inputs and this argument will be ignored and the behavior will be
equivalent to setting this argument to True.
If None, then the behavior is chosen automatically as follows:
* If ``operator_export_type=OperatorExportTypes.ONNX``, the behavior is equivalent
to setting this argument to False.
* Else, the behavior is equivalent to setting this argument to True.
custom_opsets (dict<str, int>, default empty dict): A dict with schema:
* KEY (str): opset domain name
* VALUE (int): opset version
If a custom opset is referenced by ``model`` but not mentioned in this dictionary,
the opset version is set to 1. Only custom opset domain name and version should be
indicated through this argument.
export_modules_as_functions (bool or set of type of nn.Module, default False): Flag to enable
exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the
particular types of modules to export as local functions in ONNX.
This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because
``opset_version`` < 15 implies IR version < 8, which means no local function support.
Module variables will be exported as function attributes. There are two categories of function
attributes.
1. Annotated attributes: class variables that have type annotations via
`PEP 526-style <https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations>`_
will be exported as attributes.
Annotated attributes are not used inside the subgraph of ONNX local function because
they are not created by PyTorch JIT tracing, but they may be used by consumers
to determine whether or not to replace the function with a particular fused kernel.
2. Inferred attributes: variables that are used by operators inside the module. Attribute names
will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from
python module annotations. Inferred attributes are used inside the subgraph of ONNX local function.
* ``False``(default): export ``nn.Module`` forward calls as fine grained nodes.
* ``True``: export all ``nn.Module`` forward calls as local function nodes.
* Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes,
only if the type of the ``nn.Module`` is found in the set.
Raises:
CheckerError: If the ONNX checker detects an invalid ONNX graph. Will still export the
model to the file ``f`` even if this is raised.
"""
from torch.onnx import utils
return utils.export(model, args, f, export_params, verbose, training,
input_names, output_names, operator_export_type, opset_version,
do_constant_folding, dynamic_axes,
keep_initializers_as_inputs, custom_opsets,
export_modules_as_functions)
def export_to_pretty_string(*args, **kwargs) -> str:
r"""
Similar to :func:`export`, but returns a text representation of the ONNX
model. Only differences in args listed below. All other args are the same
as :func:`export`.
Args:
add_node_names (bool, default True): Whether or not to set
NodeProto.name. This makes no difference unless
``google_printer=True``.
google_printer (bool, default False): If False, will return a custom,
compact representation of the model. If True will return the
protobuf's `Message::DebugString()`, which is more verbose.
Returns:
A UTF-8 str containing a human-readable representation of the ONNX model.
"""
from torch.onnx import utils
return utils.export_to_pretty_string(*args, **kwargs)
def _optimize_trace(graph, operator_export_type):
from torch.onnx import utils
return utils._optimize_graph(graph, operator_export_type)
def select_model_mode_for_export(model, mode):
r"""
A context manager to temporarily set the training mode of ``model``
to ``mode``, resetting it when we exit the with-block. A no-op if
mode is None.
Args:
model: Same type and meaning as ``model`` arg to :func:`export`.
mode: Same type and meaning as ``training`` arg to :func:`export`.
"""
from torch.onnx import utils
return utils.select_model_mode_for_export(model, mode)
def _run_symbolic_function(*args, **kwargs):
from torch.onnx import utils
return utils._run_symbolic_function(*args, **kwargs)
def _run_symbolic_method(*args, **kwargs):
from torch.onnx import utils
return utils._run_symbolic_method(*args, **kwargs)
def is_in_onnx_export():
r"""
Returns True iff :func:`export` is running in the current thread
"""
from torch.onnx import utils
return utils.is_in_onnx_export()
def register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version):
r"""
Registers ``symbolic_fn`` to handle ``symbolic_name``. See
"Custom Operators" in the module documentation for an example usage.
Args:
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
format.
symbolic_fn (Callable): A function that takes in the ONNX graph and
the input arguments to the current operator, and returns new
operator nodes to add to the graph.
opset_version (int): The ONNX opset version in which to register.
"""
from torch.onnx import utils
utils.register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version)
def unregister_custom_op_symbolic(symbolic_name, opset_version):
r"""
Unregisters ``symbolic_name``. See
"Custom Operators" in the module documentation for an example usage.
Args:
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
format.
opset_version (int): The ONNX opset version in which to unregister.
"""
from torch.onnx import utils
utils.unregister_custom_op_symbolic(symbolic_name, opset_version)
def is_onnx_log_enabled():
r"""
Returns True iff ONNX logging is turned on.
"""
return _C._jit_is_onnx_log_enabled()
def enable_log():
r"""
Enables ONNX logging.
"""
_C._jit_set_onnx_log_enabled(True)
def disable_log():
r"""
Disables ONNX logging.
"""
_C._jit_set_onnx_log_enabled(False)
def set_log_stream(stream_name="stdout"):
r"""
Set output stream for ONNX logging.
Args:
stream_name (str, default "stdout"): Only ``stdout`` and ``stderr`` are supported
as `stream_name`.
"""
_C._jit_set_onnx_log_output_stream(stream_name)
def log(*args):
r"""
A simple logging facility for ONNX exporter.
Args:
args: Arguments are converted to string, concatenated together with a newline
character appended to the end, and flushed to output stream.
"""
_C._jit_onnx_log(*args)
| 42.617834 | 112 | 0.608678 |
ead5b5bf57954074d7d6e8bb835c66e28446ee45 | 911 | py | Python | example/example_04.py | owlbarn/owl_onnx | 956f45dd80cc28d85f9b4c061e21c878f969eefd | [
"MIT"
] | 6 | 2019-09-16T20:42:42.000Z | 2019-10-30T11:51:14.000Z | example/example_04.py | owlbarn/owl_onnx | 956f45dd80cc28d85f9b4c061e21c878f969eefd | [
"MIT"
] | null | null | null | example/example_04.py | owlbarn/owl_onnx | 956f45dd80cc28d85f9b4c061e21c878f969eefd | [
"MIT"
] | 1 | 2019-10-26T15:46:26.000Z | 2019-10-26T15:46:26.000Z | #!/usr/bin/env python3
### Eval option 1: onnxruntime
import numpy as np
import onnxruntime as rt
sess = rt.InferenceSession("test.onnx")
# Note how the initializer works without usr providing input
pred_onx = sess.run(None, input_feed={})
print(pred_onx[0])
# The user can also provide her own input
input_x = sess.get_inputs() # NOTE: This gives an empty set this time.
input_x = sess.get_overridable_initializers()[0]
input_name_x = input_x.name
input_shape_x = input_x.shape
# Check input_x.type, we find the type is tensor(float)
x = np.ones(input_shape_x, dtype="float32")
# NOTE: x = np.ones(input_shape_x, dtype="float") will leads to an error
pred_onx = sess.run(None, {input_name_x: x})
print(pred_onx[0])
### Expected output
"""
[[ 0.84147096 0.9092974 0.14112 ]
[-0.7568025 -0.9589243 -0.2794155 ]]
[[0.84147096 0.84147096 0.84147096]
[0.84147096 0.84147096 0.84147096]]
"""
| 26.028571 | 72 | 0.720088 |
ab197fbb530657e4f67b983c81a38d3d7ee3aa8e | 1,696 | py | Python | src/lib/zircon/rust/tools/gen_status.py | EnderNightLord-ChromeBook/zircon-rpi | b09b1eb3aa7a127c65568229fe10edd251869283 | [
"BSD-2-Clause"
] | 14 | 2020-10-25T05:48:36.000Z | 2021-09-20T02:46:20.000Z | src/lib/zircon/rust/tools/gen_status.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | null | null | null | src/lib/zircon/rust/tools/gen_status.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | 2 | 2020-10-25T01:13:49.000Z | 2020-10-26T02:32:13.000Z | #!/usr/bin/env python3.8
# Copyright 2016 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A tool for autogenerating the mapping between Status and zx_status_t
# Usage: python gen_status.py zircon/system/public/zircon/errors.h {sys,enum,match}
import re
import sys
status_re = re.compile('#define\s+(ZX_\w+)\s+\((\-?\d+)\)$')
def parse(in_filename):
result = []
for line in file(in_filename):
m = status_re.match(line)
if m:
result.append((m.group(1), int(m.group(2))))
return result
def to_snake_case(name):
result = []
for element in name.split('_'):
result.append(element[0] + element[1:].lower())
return ''.join(result)
def out(style, l):
print('// Auto-generated using tools/gen_status.py')
longest = max(len(name) for (name, num) in l)
if style == 'sys':
for (name, num) in l:
print(
'pub const %s : zx_status_t = %d;' % (name.ljust(longest), num))
if style == 'enum':
print('pub enum Status {')
for (name, num) in l:
print(' %s = %d,' % (to_snake_case(name[3:]), num))
print('')
print(
' /// Any zx_status_t not in the set above will map to the following:'
)
print(' UnknownOther = -32768,')
print('}')
if style == 'match':
for (name, num) in l:
print(
' sys::%s => Status::%s,' %
(name, to_snake_case(name[3:])))
print(' _ => Status::UnknownOther,')
l = parse(sys.argv[1])
out(sys.argv[2], l)
| 29.241379 | 85 | 0.556604 |
b9216d4ada8df1351413aa3d61c6b4799a9d279d | 9,410 | py | Python | jobless/jobhandlers/slurm_backend.py | sjdv1982/cloudless | 05d7decaed6751cab43a468ea9bfc5961e89d259 | [
"MIT"
] | null | null | null | jobless/jobhandlers/slurm_backend.py | sjdv1982/cloudless | 05d7decaed6751cab43a468ea9bfc5961e89d259 | [
"MIT"
] | 1 | 2021-05-07T17:08:18.000Z | 2021-05-07T17:08:18.000Z | jobless/jobhandlers/slurm_backend.py | sjdv1982/cloudless | 05d7decaed6751cab43a468ea9bfc5961e89d259 | [
"MIT"
] | null | null | null | from . import Backend, SeamlessTransformationError, JoblessRemoteError
import asyncio
import sys, os, tempfile, shutil
import psutil
import json
import subprocess, tarfile
from functools import partial
import numpy as np
from io import BytesIO
import traceback
class SlurmBackend(Backend):
support_symlinks = True
STATUS_POLLING_INTERVAL = 2.0 # TODO: conf file
SLURM_EXTRA_HEADER = None # TODO: conf file
JOB_TEMPDIR = None
def __init__(self, *args, executor, **kwargs):
self.executor = executor
self.coros = {}
self.jobs = set()
super().__init__(*args, **kwargs)
def get_job_status(self, checksum, identifier):
# TODO: invoke squeue in real time (will be a few sec more up-to-date)
return 2, None, None
def get_code(self, transformation, prepared_transformation):
"""To be implemented by subclass"""
raise NotImplementedError
def launch_transformation(self, checksum, transformation, prepared_transformation):
from .file_transformer_plugin import write_files
prepared_transformation = prepared_transformation.copy()
for key in prepared_transformation:
if key in ("__checksum__", "__env__"):
continue
filename, value, env_value = prepared_transformation[key]
if filename is None:
continue
prepared_transformation[key] = os.path.abspath(os.path.expanduser(filename)), value, env_value
jobname = "seamless-" + checksum.hex()
code = self.get_code(transformation, prepared_transformation)
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix="jobless-",dir=self.JOB_TEMPDIR)
print("Running slurm job in {}".format(tempdir), file=sys.stderr)
try:
os.chdir(tempdir)
env = {}
write_files(prepared_transformation, env, self.support_symlinks)
jobid = self.submit_job(jobname, self.SLURM_EXTRA_HEADER, env, code, prepared_transformation)
except subprocess.CalledProcessError as exc:
error_message = str(exc)
if len(exc.stderr.strip()):
error_message += "\nError message: {}".format(exc.stderr.strip().decode())
async def get_error():
raise SeamlessTransformationError(error_message)
coro = get_error()
jobid = None
os.chdir(old_cwd)
shutil.rmtree(tempdir, ignore_errors=True)
finally:
os.chdir(old_cwd)
if jobid is not None:
coro = await_job(jobname, jobid, code, self.TF_TYPE, tempdir, self.STATUS_POLLING_INTERVAL, "RESULT")
self.jobs.add(jobid)
coro = asyncio.ensure_future(coro)
self.coros[checksum] = coro
return coro, jobid
def submit_job(self, jobname, slurm_extra_header, env, code, prepared_transformation):
"""To be implemented by subclass"""
raise NotImplementedError
def cancel_job(self, checksum, identifier):
jobid = identifier
if jobid not in self.jobs:
return
cmd = "scancel {}".format(jobid)
try:
subprocess.run(cmd, shell=True, check=True)
except subprocess.CalledProcessError:
traceback.print_exc()
if checksum in self.coros:
coro = self.coros.pop(checksum)
task = asyncio.ensure_future(coro)
task.cancel()
from .shell_backend import parse_resultfile
def submit_job(jobname, slurm_extra_header, env, code, *, use_host_environment):
export = "ALL" if use_host_environment else "NONE"
env_names = ",".join(sorted(env.keys()))
slurmheader = """#!/bin/bash
#SBATCH -o {}.out
#SBATCH -e {}.err
#SBATCH --export={},{}
""".format(jobname, jobname, export, env_names)
code2 = slurmheader
if slurm_extra_header is not None:
code2 += slurm_extra_header + "\n"
code2 += code + "\n"
with open("SLURMFILE", "w") as f:
f.write(code2)
os.chmod("SLURMFILE", 0o755)
cmd = "sbatch -J {} SLURMFILE".format(jobname)
env2 = os.environ.copy()
env2.update(env)
# This is ridiculous... Even with an error message such as "sbatch: error: No PATH environment variable", the error code is 0!!
### result = subprocess.check_output(cmd, env=env2, shell=True)
# Let's try to fix that...
process = subprocess.run(cmd, shell=True, env=env2, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = process.stdout
if len(process.stderr.strip()):
if len(result):
identifier = result.decode().split()[-1]
subprocess.run("scancel {}".format(identifier), shell=True)
raise subprocess.CalledProcessError(cmd=cmd, returncode=1, stderr=process.stderr)
identifier = result.decode().split()[-1]
return identifier
async def await_job(jobname, identifier, code, tftype, tempdir, polling_interval, resultfile):
status_command = "squeue -j {} | awk 'NR > 1'".format(identifier)
while 1:
result = subprocess.check_output(status_command, shell=True)
result = result.decode().strip("\n").strip()
if not len(result):
break
await asyncio.sleep(polling_interval)
# Let's try to retrieve an exit code
exit_code = 0
try:
cmd = "scontrol show job {}".format(identifier)
result = subprocess.check_output(cmd, shell=True)
marker = " ExitCode="
for l in result.decode().splitlines():
pos = l.find(marker)
if pos > -1:
ll = l[pos+len(marker)]
pos2 = ll.find(":")
if pos2 > -1:
ll = ll[:pos2]
exit_code = int(ll)
except Exception:
pass
#print("EXIT CODE", exit_code)
stdout = ""
stderr = ""
result = None
old_cwd = os.getcwd()
try:
os.chdir(tempdir)
try:
stdout = open("{}.out".format(jobname), "rb").read()
stdout = stdout.decode()
except Exception:
pass
try:
stderr = open("{}.err".format(jobname), "rb").read()
stderr = stderr.decode()
except Exception:
pass
if exit_code == 0 and os.path.exists(resultfile):
result = parse_resultfile(resultfile)
finally:
os.chdir(old_cwd)
shutil.rmtree(tempdir, ignore_errors=True) ###
error_msg = None
if exit_code > 0:
error_msg = "Error: Non-zero exit code {}".format(exit_code)
elif result is None:
error_msg = "Error: Result file {} does not exist".format(resultfile)
if error_msg is None:
return result
else:
msg = """
{tftype} transformer exception
==========================
*************************************************
* Command
*************************************************
{}
*************************************************
{}
""".format(code, error_msg, tftype=tftype)
if len(stdout):
msg += """*************************************************
* Standard output
*************************************************
{}
*************************************************
""".format(stdout)
if len(stderr):
msg += """*************************************************
* Standard error
*************************************************
{}
*************************************************
""".format(stderr)
raise SeamlessTransformationError(msg)
####################################################################################
class SlurmBashBackend(SlurmBackend):
support_symlinks = True
TF_TYPE = "Bash"
USE_HOST_ENVIRONMENT = True
def get_code(self, transformation, prepared_transformation):
return prepared_transformation["bashcode"][1]
def submit_job(self, jobname, slurm_extra_header, env, code, prepared_transformation):
msg = "Submit slurm bash job {}"
print(msg.format(jobname), file=sys.stderr)
return submit_job(
jobname, slurm_extra_header, env, code,
use_host_environment=self.USE_HOST_ENVIRONMENT
)
class SlurmSingularityBackend(SlurmBackend):
support_symlinks = False
TF_TYPE = "Docker"
USE_HOST_ENVIRONMENT = False
def get_code(self, transformation, prepared_transformation):
docker_command, _ = get_docker_command_and_image(
prepared_transformation
)
return docker_command
def submit_job(self, jobname, slurm_extra_header, env, code, prepared_transformation):
_, docker_image = get_docker_command_and_image(
prepared_transformation
)
with open("CODE.bash", "w") as f:
f.write(code + "\n")
os.chmod("CODE.bash", 0o755)
simg = "{}/{}.simg".format(
self.SINGULARITY_IMAGE_DIR,
docker_image
)
singularity_command = "{} {} ./CODE.bash".format(
self.SINGULARITY_EXEC,
simg
)
msg = "Submit slurm singularity job {}, image {}"
print(msg.format(jobname, simg), file=sys.stderr)
return submit_job(
jobname, slurm_extra_header, env, singularity_command,
use_host_environment=self.USE_HOST_ENVIRONMENT
)
from .shell_backend import get_docker_command_and_image | 34.723247 | 131 | 0.586504 |
244dff6d0e554b85e25065c821609a3749c0c192 | 4,495 | py | Python | mkdocs/tests/search_tests.py | MahdiMajidzadeh/mkdocs | 8c668d972b92f49c6a6b0ffdcefcf10fd6dc5a4c | [
"BSD-2-Clause"
] | 4 | 2019-09-05T07:46:32.000Z | 2021-02-24T21:19:51.000Z | mkdocs/tests/search_tests.py | MahdiMajidzadeh/mkdocs | 8c668d972b92f49c6a6b0ffdcefcf10fd6dc5a4c | [
"BSD-2-Clause"
] | 10 | 2017-05-10T08:10:23.000Z | 2020-03-23T10:23:37.000Z | mkdocs/tests/search_tests.py | MahdiMajidzadeh/mkdocs | 8c668d972b92f49c6a6b0ffdcefcf10fd6dc5a4c | [
"BSD-2-Clause"
] | 38 | 2017-04-26T14:13:37.000Z | 2021-06-24T11:36:38.000Z | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import unittest
from mkdocs import nav
from mkdocs.contrib.legacy_search import search_index as search
from mkdocs.tests.base import dedent, markdown_to_toc, load_config
def strip_whitespace(string):
return string.replace("\n", "").replace(" ", "")
class SearchTests(unittest.TestCase):
def test_html_stripper(self):
stripper = search.HTMLStripper()
stripper.feed("<h1>Testing</h1><p>Content</p>")
self.assertEquals(stripper.data, ["Testing", "Content"])
def test_content_parser(self):
parser = search.ContentParser()
parser.feed('<h1 id="title">Title</h1>TEST')
parser.close()
self.assertEquals(parser.data, [search.ContentSection(
text=["TEST"],
id_="title",
title="Title"
)])
def test_content_parser_no_id(self):
parser = search.ContentParser()
parser.feed("<h1>Title</h1>TEST")
parser.close()
self.assertEquals(parser.data, [search.ContentSection(
text=["TEST"],
id_=None,
title="Title"
)])
def test_content_parser_content_before_header(self):
parser = search.ContentParser()
parser.feed("Content Before H1 <h1>Title</h1>TEST")
parser.close()
self.assertEquals(parser.data, [search.ContentSection(
text=["TEST"],
id_=None,
title="Title"
)])
def test_content_parser_no_sections(self):
parser = search.ContentParser()
parser.feed("No H1 or H2<span>Title</span>TEST")
self.assertEquals(parser.data, [])
def test_find_toc_by_id(self):
"""
Test finding the relevant TOC item by the tag ID.
"""
index = search.SearchIndex()
md = dedent("""
# Heading 1
## Heading 2
### Heading 3
""")
toc = markdown_to_toc(md)
toc_item = index._find_toc_by_id(toc, "heading-1")
self.assertEqual(toc_item.url, "#heading-1")
self.assertEqual(toc_item.title, "Heading 1")
toc_item2 = index._find_toc_by_id(toc, "heading-2")
self.assertEqual(toc_item2.url, "#heading-2")
self.assertEqual(toc_item2.title, "Heading 2")
toc_item3 = index._find_toc_by_id(toc, "heading-3")
self.assertEqual(toc_item3.url, "#heading-3")
self.assertEqual(toc_item3.title, "Heading 3")
def test_create_search_index(self):
html_content = """
<h1 id="heading-1">Heading 1</h1>
<p>Content 1</p>
<h2 id="heading-2">Heading 2</h1>
<p>Content 2</p>
<h3 id="heading-3">Heading 3</h1>
<p>Content 3</p>
"""
pages = [
{'Home': 'index.md'},
{'About': 'about.md'},
]
site_navigation = nav.SiteNavigation(load_config(pages=pages))
md = dedent("""
# Heading 1
## Heading 2
### Heading 3
""")
toc = markdown_to_toc(md)
full_content = ''.join("""Heading{0}Content{0}""".format(i) for i in range(1, 4))
for page in site_navigation:
# Fake page.read_source() and page.render()
page.markdown = md
page.toc = toc
page.content = html_content
index = search.SearchIndex()
index.add_entry_from_context(page)
self.assertEqual(len(index._entries), 4)
loc = page.abs_url
self.assertEqual(index._entries[0]['title'], page.title)
self.assertEqual(strip_whitespace(index._entries[0]['text']), full_content)
self.assertEqual(index._entries[0]['location'], loc)
self.assertEqual(index._entries[1]['title'], "Heading 1")
self.assertEqual(index._entries[1]['text'], "Content 1")
self.assertEqual(index._entries[1]['location'], "{0}#heading-1".format(loc))
self.assertEqual(index._entries[2]['title'], "Heading 2")
self.assertEqual(strip_whitespace(index._entries[2]['text']), "Content2")
self.assertEqual(index._entries[2]['location'], "{0}#heading-2".format(loc))
self.assertEqual(index._entries[3]['title'], "Heading 3")
self.assertEqual(strip_whitespace(index._entries[3]['text']), "Content3")
self.assertEqual(index._entries[3]['location'], "{0}#heading-3".format(loc))
| 29.188312 | 89 | 0.588877 |
6c3c897a1b47a24210e3fce062f6c556c8ffaca5 | 85,402 | py | Python | src/sage/combinat/rigged_configurations/rigged_configuration_element.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/rigged_configurations/rigged_configuration_element.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/rigged_configurations/rigged_configuration_element.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | null | null | null | r"""
Rigged Configuration Elements
A rigged configuration element is a sequence of
:class:`~sage.combinat.rigged_configurations.rigged_partition.RiggedPartition`
objects.
AUTHORS:
- Travis Scrimshaw (2010-09-26): Initial version
- Travis Scrimshaw (2012-10-25): Added virtual rigged configurations
"""
# ****************************************************************************
# Copyright (C) 2010-2012 Travis Scrimshaw <tscrim@ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.structure.list_clone import ClonableArray
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition, RiggedPartitionTypeB
####################################################
# Base classes for rigged configuration elements #
####################################################
class RiggedConfigurationElement(ClonableArray):
"""
A rigged configuration for simply-laced types.
For more information on rigged configurations, see
:class:`RiggedConfigurations`. For rigged configurations for
non-simply-laced types, use :class:`RCNonSimplyLacedElement`.
Typically to create a specific rigged configuration, the user will pass in
the optional argument ``partition_list`` and if the user wants to specify
the rigging values, give the optional argument ``rigging_list`` as well.
If ``rigging_list`` is not passed, the rigging values are set to the
corresponding vacancy numbers.
INPUT:
- ``parent`` -- the parent of this element
- ``rigged_partitions`` -- a list of rigged partitions
There are two optional arguments to explicitly construct a rigged
configuration. The first is ``partition_list`` which gives a list of
partitions, and the second is ``rigging_list`` which is a list of
corresponding lists of riggings. If only partition_list is specified,
then it sets the rigging equal to the calculated vacancy numbers.
If we are constructing a rigged configuration from a rigged configuration
(say of another type) and we don't want to recompute the vacancy numbers,
we can use the ``use_vacancy_numbers`` to avoid the recomputation.
EXAMPLES:
Type `A_n^{(1)}` examples::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 2]])
sage: RC(partition_list=[[2], [2, 2], [2], [2]])
<BLANKLINE>
0[ ][ ]0
<BLANKLINE>
-2[ ][ ]-2
-2[ ][ ]-2
<BLANKLINE>
2[ ][ ]2
<BLANKLINE>
-2[ ][ ]-2
<BLANKLINE>
sage: RC = RiggedConfigurations(['A', 4, 1], [[1, 1], [1, 1]])
sage: RC(partition_list=[[], [], [], []])
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
Type `D_n^{(1)}` examples::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: RC(partition_list=[[3], [3,2], [4], [3]])
<BLANKLINE>
-1[ ][ ][ ]-1
<BLANKLINE>
1[ ][ ][ ]1
0[ ][ ]0
<BLANKLINE>
-3[ ][ ][ ][ ]-3
<BLANKLINE>
-1[ ][ ][ ]-1
<BLANKLINE>
sage: RC = RiggedConfigurations(['D', 4, 1], [[1, 1], [2, 1]])
sage: RC(partition_list=[[1], [1,1], [1], [1]])
<BLANKLINE>
1[ ]1
<BLANKLINE>
0[ ]0
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
sage: elt = RC(partition_list=[[1], [1,1], [1], [1]], rigging_list=[[0], [0,0], [0], [0]]); elt
<BLANKLINE>
1[ ]0
<BLANKLINE>
0[ ]0
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
sage: from sage.combinat.rigged_configurations.rigged_partition import RiggedPartition
sage: RC2 = RiggedConfigurations(['D', 5, 1], [[2, 1], [3, 1]])
sage: l = [RiggedPartition()] + list(elt)
sage: ascii_art(RC2(*l))
(/) 1[ ]0 0[ ]0 0[ ]0 0[ ]0
0[ ]0
sage: ascii_art(RC2(*l, use_vacancy_numbers=True))
(/) 1[ ]0 0[ ]0 0[ ]0 0[ ]0
0[ ]0
"""
def __init__(self, parent, rigged_partitions=[], **options):
r"""
Construct a rigged configuration element.
.. WARNING::
This changes the vacancy numbers of the rigged partitions, so
if the rigged partitions comes from another rigged configuration,
a deep copy should be made before being passed here. We do not
make a deep copy here because the crystal operators generate
their own rigged partitions. See :trac:`17054`.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 1]])
sage: RC(partition_list=[[], [], [], []])
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
sage: RC(partition_list=[[1], [1], [], []])
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
sage: elt = RC(partition_list=[[1], [1], [], []], rigging_list=[[-1], [0], [], []]); elt
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
sage: TestSuite(elt).run()
"""
n = options.get('n', parent._cartan_type.rank())
if "partition_list" in options:
data = options["partition_list"]
if len(data) == 0:
# Create a size n array of empty rigged tableau since no tableau
# were given
nu = []
for i in range(n):
nu.append(RiggedPartition())
else:
if len(data) != n: # otherwise n should be equal to the number of tableaux
raise ValueError("incorrect number of partitions")
nu = []
if "rigging_list" in options:
rigging_data = options["rigging_list"]
if len(rigging_data) != n:
raise ValueError("incorrect number of riggings")
for i in range(n):
nu.append(RiggedPartition(tuple(data[i]),
list(rigging_data[i])))
else:
for partition_data in data:
nu.append(RiggedPartition(tuple(partition_data)))
elif n == len(rigged_partitions) and isinstance(rigged_partitions[0], RiggedPartition):
# The isinstance check is to make sure we are not in the n == 1 special case because
# Parent's __call__ always passes at least 1 argument to the element constructor
if options.get('use_vacancy_numbers', False):
ClonableArray.__init__(self, parent, rigged_partitions)
return
nu = rigged_partitions
else:
# Otherwise we did not receive any info, create a size n array of
# empty rigged partitions
nu = []
for i in range(n):
nu.append(RiggedPartition())
#raise ValueError("Invalid input")
#raise ValueError("Incorrect number of rigged partitions")
# Set the vacancy numbers
for a, partition in enumerate(nu):
# If the partition is empty, there's nothing to do
if len(partition) <= 0:
continue
# Setup the first block
block_len = partition[0]
vac_num = parent._calc_vacancy_number(nu, a, block_len)
for i, row_len in enumerate(partition):
# If we've gone to a different sized block, then update the
# values which change when moving to a new block size
if block_len != row_len:
vac_num = parent._calc_vacancy_number(nu, a, row_len)
block_len = row_len
partition.vacancy_numbers[i] = vac_num
if partition.rigging[i] is None:
partition.rigging[i] = partition.vacancy_numbers[i]
ClonableArray.__init__(self, parent, nu)
def check(self):
"""
Check the rigged configuration is properly defined.
There is nothing to check here.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['A', 4])
sage: b = RC.module_generators[0].f_string([1,2,1,1,2,4,2,3,3,2])
sage: b.check()
"""
pass
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: elt = RC(partition_list=[[2], [3,1], [3], [3]]); elt
<BLANKLINE>
-1[ ][ ]-1
<BLANKLINE>
2[ ][ ][ ]2
0[ ]0
<BLANKLINE>
-2[ ][ ][ ]-2
<BLANKLINE>
-2[ ][ ][ ]-2
<BLANKLINE>
sage: RC.options(display='horizontal')
sage: elt
-1[ ][ ]-1 2[ ][ ][ ]2 -2[ ][ ][ ]-2 -2[ ][ ][ ]-2
0[ ]0
sage: RC.options._reset()
"""
return self.parent().options._dispatch(self, '_repr_', 'display')
def _repr_vertical(self):
"""
Return the string representation of ``self`` vertically.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: print(RC(partition_list=[[2], [3,1], [3], [3]])._repr_vertical())
<BLANKLINE>
-1[ ][ ]-1
<BLANKLINE>
2[ ][ ][ ]2
0[ ]0
<BLANKLINE>
-2[ ][ ][ ]-2
<BLANKLINE>
-2[ ][ ][ ]-2
<BLANKLINE>
sage: print(RC(partition_list=[[],[],[],[]])._repr_vertical())
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
"""
ret_str = ""
for tableau in self:
ret_str += "\n" + repr(tableau)
return(ret_str)
def _repr_horizontal(self):
"""
Return the string representation of ``self`` horizontally.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: print(RC(partition_list=[[2], [3,1], [3], [3]])._repr_horizontal())
-1[ ][ ]-1 2[ ][ ][ ]2 -2[ ][ ][ ]-2 -2[ ][ ][ ]-2
0[ ]0
sage: print(RC(partition_list=[[],[],[],[]])._repr_horizontal())
(/) (/) (/) (/)
"""
tab_str = [repr(x).splitlines() for x in self]
height = max(len(t) for t in tab_str)
widths = [max(len(x) for x in t) for t in tab_str]
ret_str = ''
for i in range(height):
if i != 0:
ret_str += '\n'
for j,t in enumerate(tab_str):
if j != 0:
ret_str += ' '
if i < len(t):
ret_str += t[i] + ' ' * (widths[j]-len(t[i]))
else:
ret_str += ' ' * widths[j]
return ret_str
def _latex_(self):
r"""
Return the LaTeX representation of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: latex(RC(partition_list=[[2], [3,1], [3], [3]]))
{
\begin{array}[t]{r|c|c|l}
\cline{2-3} -1 &\phantom{|}&\phantom{|}& -1 \\
\cline{2-3}
\end{array}
}
\quad
{
\begin{array}[t]{r|c|c|c|l}
\cline{2-4} 2 &\phantom{|}&\phantom{|}&\phantom{|}& 2 \\
\cline{2-4} 0 &\phantom{|}& \multicolumn{3 }{l}{ 0 } \\
\cline{2-2}
\end{array}
}
\quad
{
\begin{array}[t]{r|c|c|c|l}
\cline{2-4} -2 &\phantom{|}&\phantom{|}&\phantom{|}& -2 \\
\cline{2-4}
\end{array}
}
\quad
{
\begin{array}[t]{r|c|c|c|l}
\cline{2-4} -2 &\phantom{|}&\phantom{|}&\phantom{|}& -2 \\
\cline{2-4}
\end{array}
}
sage: latex(RC(partition_list=[[],[],[],[]]))
{\emptyset}
\quad
{\emptyset}
\quad
{\emptyset}
\quad
{\emptyset}
"""
ret_string = self[0]._latex_()
for partition in self[1:]:
ret_string += "\n\\quad\n" + partition._latex_()
return ret_string
def _ascii_art_(self):
"""
Return an ASCII art representation of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: ascii_art(RC(partition_list=[[2], [3,1], [3], [3]]))
-1[ ][ ]-1 2[ ][ ][ ]2 -2[ ][ ][ ]-2 -2[ ][ ][ ]-2
0[ ]0
sage: ascii_art(RC(partition_list=[[],[],[],[]]))
(/) (/) (/) (/)
sage: RC = RiggedConfigurations(['D', 7, 1], [[3,3],[5,2],[4,3],[2,3],[4,4],[3,1],[1,4],[2,2]])
sage: elt = RC(partition_list=[[2],[3,2,1],[2,2,1,1],[2,2,1,1,1,1],[3,2,1,1,1,1],[2,1,1],[2,2]],
....: rigging_list=[[2],[1,0,0],[4,1,2,1],[1,0,0,0,0,0],[0,1,0,0,0,0],[0,0,0],[0,0]])
sage: ascii_art(elt)
3[ ][ ]2 1[ ][ ][ ]1 4[ ][ ]4 2[ ][ ]1 0[ ][ ][ ]0 0[ ][ ]0 0[ ][ ]0
2[ ][ ]0 4[ ][ ]1 2[ ][ ]0 2[ ][ ]1 0[ ]0 0[ ][ ]0
1[ ]0 3[ ]2 0[ ]0 0[ ]0 0[ ]0
3[ ]1 0[ ]0 0[ ]0
0[ ]0 0[ ]0
0[ ]0 0[ ]0
sage: Partitions.options(convention='French')
sage: ascii_art(elt)
0[ ]0 0[ ]0
0[ ]0 0[ ]0
3[ ]1 0[ ]0 0[ ]0
1[ ]0 3[ ]2 0[ ]0 0[ ]0 0[ ]0
2[ ][ ]0 4[ ][ ]1 2[ ][ ]0 2[ ][ ]1 0[ ]0 0[ ][ ]0
3[ ][ ]2 1[ ][ ][ ]1 4[ ][ ]4 2[ ][ ]1 0[ ][ ][ ]0 0[ ][ ]0 0[ ][ ]0
sage: Partitions.options._reset()
"""
from sage.combinat.partition import Partitions
if Partitions.options.convention == "French":
baseline = lambda s: 0
else:
baseline = len
from sage.typeset.ascii_art import AsciiArt
s = repr(self[0]).splitlines()
ret = AsciiArt(s, baseline=baseline(s))
for tableau in self[1:]:
s = repr(tableau).splitlines()
ret += AsciiArt([" "], baseline=baseline(s)) + AsciiArt(s, baseline=baseline(s))
return ret
def nu(self):
r"""
Return the list `\nu` of rigged partitions of this rigged
configuration element.
OUTPUT:
The `\nu` array as a list.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 2]])
sage: RC(partition_list=[[2], [2,2], [2], [2]]).nu()
[0[ ][ ]0
, -2[ ][ ]-2
-2[ ][ ]-2
, 2[ ][ ]2
, -2[ ][ ]-2
]
"""
return list(self)
# TODO: Change e/f to work for all types
def e(self, a):
r"""
Return the action of the crystal operator `e_a` on ``self``.
This implements the method defined in [CrysStructSchilling06]_ which
finds the value `k` which is the length of the string with the
smallest negative rigging of smallest length. Then it removes a box
from a string of length `k` in the `a`-th rigged partition, keeping all
colabels fixed and increasing the new label by one. If no such string
exists, then `e_a` is undefined.
This method can also be used when the underlying Cartan matrix is a
Borcherds-Cartan matrix. In this case, then method of [SS2018]_ is
used, where the new label is increased by half of the `a`-th diagonal
entry of the underlying Borcherds-Cartan matrix. This method will also
return ``None`` if `a` is imaginary and the smallest rigging in the
`a`-th rigged partition is not exactly half of the `a`-th diagonal entry
of the Borcherds-Cartan matrix.
INPUT:
- ``a`` -- the index of the partition to remove a box
OUTPUT:
The resulting rigged configuration element.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2,1]])
sage: elt = RC(partition_list=[[1], [1], [1], [1]])
sage: elt.e(3)
sage: elt.e(1)
<BLANKLINE>
(/)
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
sage: A = CartanMatrix([[-2,-1],[-1,-2]], borcherds=True)
sage: RC = crystals.infinity.RiggedConfigurations(A)
sage: nu0 = RC(partition_list=[[],[]])
sage: nu = nu0.f_string([1,0,0,0])
sage: ascii_art(nu.e(0))
5[ ]3 4[ ]3
5[ ]1
"""
if a not in self.parent()._rc_index_inverse:
raise ValueError("{} is not in the index set".format(a))
a = self.parent()._rc_index_inverse[a]
M = self.parent()._cartan_matrix
new_list = self[a][:]
new_vac_nums = self[a].vacancy_numbers[:]
new_rigging = self[a].rigging[:]
# Separate out one of the Borcherds cases
if M[a,a] != 2:
k = None
set_vac_num = True
if new_rigging[-1] != -M[a,a] // 2:
return None
new_list.pop()
new_vac_nums.pop()
new_rigging.pop()
else:
# Find k and perform e_a
k = None
num_rows = len(new_list)
cur_rigging = -1
rigging_index = None
for i in range(num_rows):
if new_rigging[i] <= cur_rigging:
cur_rigging = new_rigging[i]
rigging_index = i
# If we've not found a valid k
if rigging_index is None:
return None
# Note that because the riggings are weakly decreasing, we will always
# remove the last box on of a block
k = new_list[rigging_index]
set_vac_num = False
if k == 1:
new_list.pop()
new_vac_nums.pop()
new_rigging.pop()
else:
new_list[rigging_index] -= 1
cur_rigging += M[a,a] // 2
# Properly sort the riggings
j = rigging_index + 1
# Update the vacancy number if the row lengths are the same
if j < num_rows and new_list[j] == new_list[rigging_index]:
new_vac_nums[rigging_index] = new_vac_nums[j]
set_vac_num = True
while j < num_rows and new_list[j] == new_list[rigging_index] \
and new_rigging[j] > cur_rigging:
new_rigging[j-1] = new_rigging[j] # Shuffle it along
j += 1
new_rigging[j-1] = cur_rigging
new_partitions = []
for b in range(len(self)):
if b != a:
new_partitions.append(self._generate_partition_e(a, b, k))
else:
# Update the vacancy numbers and the rigging
for i in range(len(new_vac_nums)):
if k is not None and new_list[i] < k:
break
new_vac_nums[i] += M[a,b]
new_rigging[i] += M[a,b]
if k != 1 and not set_vac_num: # If we did not remove a row nor found another row of length k-1
new_vac_nums[rigging_index] += 2
new_partitions.append(RiggedPartition(new_list, new_rigging, new_vac_nums))
ret_RC = self.__class__(self.parent(), new_partitions, use_vacancy_numbers=True)
nu = ret_RC.nu()
if k != 1 and not set_vac_num: # If we did not remove a row nor found another row of length k-1
# Update that row's vacancy number
ret_RC[a].vacancy_numbers[rigging_index] = \
self.parent()._calc_vacancy_number(nu, a, nu[a][rigging_index])
return ret_RC
def _generate_partition_e(self, a, b, k):
r"""
Generate a new partition for a given value of `a` by updating the
vacancy numbers and preserving co-labels for the map `e_a`.
INPUT:
- ``a`` -- the index of the partition we operated on
- ``b`` -- the index of the partition to generate
- ``k`` -- the length of the string with the smallest negative
rigging of smallest length
OUTPUT:
The constructed rigged partition.
TESTS::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2,1]])
sage: RC(partition_list=[[1], [1], [1], [1]])._generate_partition_e(1, 2, 1)
-1[ ]-1
<BLANKLINE>
"""
# Check to make sure we will do something
if not self.parent()._cartan_matrix[a,b]:
return self[b]
new_list = self[b]._list
new_vac_nums = self[b].vacancy_numbers[:]
new_rigging = self[b].rigging[:]
# Update the vacancy numbers and the rigging
value = self.parent()._cartan_matrix[b,a]
for i in range(len(new_vac_nums)):
if k is not None and new_list[i] < k:
break
new_vac_nums[i] += value
new_rigging[i] += value
return RiggedPartition(new_list, new_rigging, new_vac_nums)
def f(self, a):
r"""
Return the action of the crystal operator `f_a` on ``self``.
This implements the method defined in [CrysStructSchilling06]_ which
finds the value `k` which is the length of the string with the
smallest nonpositive rigging of largest length. Then it adds a box from
a string of length `k` in the `a`-th rigged partition, keeping all
colabels fixed and decreasing the new label by one. If no such string
exists, then it adds a new string of length 1 with label `-1`. However
we need to modify the definition to work for `B(\infty)` by removing
the condition that the resulting rigged configuration is valid.
This method can also be used when the underlying Cartan matrix is a
Borcherds-Cartan matrix. In this case, then method of [SS2018]_ is
used, where the new label is decreased by half of the `a`-th diagonal
entry of the underlying Borcherds-Cartan matrix.
INPUT:
- ``a`` -- the index of the partition to add a box
OUTPUT:
The resulting rigged configuration element.
EXAMPLES::
sage: RC = crystals.infinity.RiggedConfigurations(['A', 3])
sage: nu0 = RC.module_generators[0]
sage: nu0.f(2)
<BLANKLINE>
(/)
<BLANKLINE>
-2[ ]-1
<BLANKLINE>
(/)
<BLANKLINE>
sage: A = CartanMatrix([[-2,-1],[-1,-2]], borcherds=True)
sage: RC = crystals.infinity.RiggedConfigurations(A)
sage: nu0 = RC(partition_list=[[],[]])
sage: nu = nu0.f_string([1,0,0,0])
sage: ascii_art(nu.f(0))
9[ ]7 6[ ]5
9[ ]5
9[ ]3
9[ ]1
"""
if a not in self.parent()._rc_index_inverse:
raise ValueError("{} is not in the index set".format(a))
a = self.parent()._rc_index_inverse[a]
M = self.parent()._cartan_matrix
new_list = self[a][:]
new_vac_nums = self[a].vacancy_numbers[:]
new_rigging = self[a].rigging[:]
# Find k and perform f_a
k = None
add_index = -1 # Index where we will add our row too
rigging_index = None # Index which we will pull the rigging from
cur_rigging = ZZ.zero()
num_rows = len(new_list)
for i in reversed(range(num_rows)):
# If we need to increment a row, look for when we change rows for
# the correct index.
if add_index is None and new_list[i] != new_list[rigging_index]:
add_index = i+1
if new_rigging[i] <= cur_rigging:
cur_rigging = new_rigging[i]
k = new_list[i]
rigging_index = i
add_index = None
# If we've not found a valid k
if k is None:
new_list.append(1)
new_rigging.append(-M[a,a] // 2)
new_vac_nums.append(None)
k = 0
add_index = num_rows
num_rows += 1 # We've added a row
else:
if add_index is None: # We are adding to the first row in the list
add_index = 0
new_list[add_index] += 1
new_rigging.insert(add_index, new_rigging[rigging_index] - M[a,a] // 2)
new_vac_nums.insert(add_index, None)
new_rigging.pop(rigging_index + 1) # add 1 for the insertion
new_vac_nums.pop(rigging_index + 1)
new_partitions = []
for b in range(len(self)):
if b != a:
new_partitions.append(self._generate_partition_f(a, b, k))
else:
# Update the vacancy numbers and the rigging
for i in range(num_rows):
if new_list[i] <= k:
break
if i != add_index:
new_vac_nums[i] -= M[a,b]
new_rigging[i] -= M[a,b]
new_partitions.append(RiggedPartition(new_list, new_rigging, new_vac_nums))
new_partitions[a].vacancy_numbers[add_index] = \
self.parent()._calc_vacancy_number(new_partitions, a,
new_partitions[a][add_index])
# Note that we do not need to sort the rigging since if there was a
# smaller rigging in a larger row, then `k` would be larger.
return self.__class__(self.parent(), new_partitions, use_vacancy_numbers=True)
def _generate_partition_f(self, a, b, k):
r"""
Generate a new partition for a given value of `a` by updating the
vacancy numbers and preserving co-labels for the map `f_a`.
INPUT:
- ``a`` -- the index of the partition we operated on
- ``b`` -- the index of the partition to generate
- ``k`` -- the length of the string with smallest nonpositive rigging
of largest length
OUTPUT:
The constructed rigged partition.
TESTS::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2,1]])
sage: RC(partition_list=[[1], [1], [1], [1]])._generate_partition_f(1, 2, 1)
0[ ]0
<BLANKLINE>
"""
# Check to make sure we will do something
if not self.parent()._cartan_matrix[a,b]:
return self[b]
new_list = self[b]._list
new_vac_nums = self[b].vacancy_numbers[:]
new_rigging = self[b].rigging[:]
# Update the vacancy numbers and the rigging
value = self.parent()._cartan_matrix[b,a]
for i in range(len(new_vac_nums)):
if new_list[i] <= k:
break
new_vac_nums[i] -= value
new_rigging[i] -= value
return RiggedPartition(new_list, new_rigging, new_vac_nums)
def epsilon(self, a):
r"""
Return `\varepsilon_a` of ``self``.
Let `x_{\ell}` be the smallest string of `\nu^{(a)}` or `0` if
`\nu^{(a)} = \emptyset`, then we have
`\varepsilon_a = -\min(0, x_{\ell})`.
EXAMPLES::
sage: La = RootSystem(['B',2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1]+La[2])
sage: I = RC.index_set()
sage: matrix([[rc.epsilon(i) for i in I] for rc in RC[:4]])
[0 0]
[1 0]
[0 1]
[0 2]
"""
a = self.parent()._rc_index_inverse[a]
if not self[a]:
return ZZ.zero()
return Integer(-min(0, min(self[a].rigging)))
def phi(self, a):
r"""
Return `\varphi_a` of ``self``.
Let `x_{\ell}` be the smallest string of `\nu^{(a)}` or `0` if
`\nu^{(a)} = \emptyset`, then we have
`\varepsilon_a = p_{\infty}^{(a)} - \min(0, x_{\ell})`.
EXAMPLES::
sage: La = RootSystem(['B',2]).weight_lattice().fundamental_weights()
sage: RC = crystals.RiggedConfigurations(La[1]+La[2])
sage: I = RC.index_set()
sage: matrix([[rc.phi(i) for i in I] for rc in RC[:4]])
[1 1]
[0 3]
[0 2]
[1 1]
"""
a = self.parent()._rc_index_inverse[a]
p_inf = self.parent()._calc_vacancy_number(self, a, float("inf"))
if not self[a]:
return Integer(p_inf)
return Integer(p_inf - min(0, min(self[a].rigging)))
def vacancy_number(self, a, i):
r"""
Return the vacancy number `p_i^{(a)}`.
INPUT:
- ``a`` -- the index of the rigged partition
- ``i`` -- the row of the rigged partition
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 2]])
sage: elt = RC(partition_list=[[1], [2,1], [1], []])
sage: elt.vacancy_number(2, 3)
-2
sage: elt.vacancy_number(2, 2)
-2
sage: elt.vacancy_number(2, 1)
-1
sage: RC = RiggedConfigurations(['D',4,1], [[2,1], [2,1]])
sage: x = RC(partition_list=[[3], [3,1,1], [2], [3,1]]); ascii_art(x)
-1[ ][ ][ ]-1 1[ ][ ][ ]1 0[ ][ ]0 -3[ ][ ][ ]-3
0[ ]0 -1[ ]-1
0[ ]0
sage: x.vacancy_number(2,2)
1
"""
a = self.parent()._rc_index_inverse[a]
return self.parent()._calc_vacancy_number(self, a, i)
def partition_rigging_lists(self):
"""
Return the list of partitions and the associated list of riggings
of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['A',3,1], [[1,2],[2,2]])
sage: rc = RC(partition_list=[[2],[1],[1]], rigging_list=[[-1],[0],[-1]]); rc
<BLANKLINE>
-1[ ][ ]-1
<BLANKLINE>
1[ ]0
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
sage: rc.partition_rigging_lists()
[[[2], [1], [1]], [[-1], [0], [-1]]]
"""
partitions = []
riggings = []
for p in self:
partitions.append(list(p))
riggings.append(list(p.rigging))
return [partitions, riggings]
class RCNonSimplyLacedElement(RiggedConfigurationElement):
"""
Rigged configuration elements for non-simply-laced types.
TESTS::
sage: vct = CartanType(['C',2,1]).as_folding()
sage: RC = crystals.infinity.RiggedConfigurations(vct)
sage: elt = RC.module_generators[0].f_string([1,0,2,2,0,1]); elt
<BLANKLINE>
-2[ ][ ]-1
<BLANKLINE>
-2[ ]-1
-2[ ]-1
<BLANKLINE>
-2[ ][ ]-1
<BLANKLINE>
sage: TestSuite(elt).run()
"""
def to_virtual_configuration(self):
"""
Return the corresponding rigged configuration in the virtual crystal.
EXAMPLES::
sage: RC = RiggedConfigurations(['C',2,1], [[1,2],[1,1],[2,1]])
sage: elt = RC(partition_list=[[3],[2]]); elt
<BLANKLINE>
0[ ][ ][ ]0
<BLANKLINE>
0[ ][ ]0
sage: elt.to_virtual_configuration()
<BLANKLINE>
0[ ][ ][ ]0
<BLANKLINE>
0[ ][ ][ ][ ]0
<BLANKLINE>
0[ ][ ][ ]0
"""
return self.parent().to_virtual(self)
def e(self, a):
r"""
Return the action of `e_a` on ``self``.
This works by lifting into the virtual configuration, then applying
.. MATH::
e^v_a = \prod_{j \in \iota(a)} \hat{e}_j^{\gamma_j}
and pulling back.
EXAMPLES::
sage: vct = CartanType(['C',2,1]).as_folding()
sage: RC = crystals.infinity.RiggedConfigurations(vct)
sage: elt = RC(partition_list=[[2],[1,1],[2]], rigging_list=[[-1],[-1,-1],[-1]])
sage: ascii_art(elt.e(0))
0[ ]0 -2[ ]-1 -2[ ][ ]-1
-2[ ]-1
sage: ascii_art(elt.e(1))
-3[ ][ ]-2 0[ ]1 -3[ ][ ]-2
sage: ascii_art(elt.e(2))
-2[ ][ ]-1 -2[ ]-1 0[ ]0
-2[ ]-1
"""
vct = self.parent()._folded_ct
L = []
gamma = vct.scaling_factors()
for i in vct.folding_orbit()[a]:
L.extend([i]*gamma[a])
virtual_rc = self.parent().to_virtual(self).e_string(L)
if virtual_rc is None:
return None
return self.parent().from_virtual(virtual_rc)
def f(self, a):
r"""
Return the action of `f_a` on ``self``.
This works by lifting into the virtual configuration, then applying
.. MATH::
f^v_a = \prod_{j \in \iota(a)} \hat{f}_j^{\gamma_j}
and pulling back.
EXAMPLES::
sage: vct = CartanType(['C',2,1]).as_folding()
sage: RC = crystals.infinity.RiggedConfigurations(vct)
sage: elt = RC(partition_list=[[2],[1,1],[2]], rigging_list=[[-1],[-1,-1],[-1]])
sage: ascii_art(elt.f(0))
-4[ ][ ][ ]-2 -2[ ]-1 -2[ ][ ]-1
-2[ ]-1
sage: ascii_art(elt.f(1))
-1[ ][ ]0 -2[ ][ ]-2 -1[ ][ ]0
-2[ ]-1
sage: ascii_art(elt.f(2))
-2[ ][ ]-1 -2[ ]-1 -4[ ][ ][ ]-2
-2[ ]-1
"""
vct = self.parent()._folded_ct
L = []
gamma = vct.scaling_factors()
for i in vct.folding_orbit()[a]:
L.extend([i]*gamma[a])
virtual_rc = self.parent().to_virtual(self).f_string(L)
if virtual_rc is None:
return None
return self.parent().from_virtual(virtual_rc)
##########################################################
## Highest weight crystal rigged configuration elements ##
##########################################################
class RCHighestWeightElement(RiggedConfigurationElement):
"""
Rigged configurations in highest weight crystals.
TESTS::
sage: La = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(['A',2,1], La[0])
sage: elt = RC(partition_list=[[1,1],[1],[2]]); elt
<BLANKLINE>
-1[ ]-1
-1[ ]-1
<BLANKLINE>
1[ ]1
<BLANKLINE>
-1[ ][ ]-1
<BLANKLINE>
sage: TestSuite(elt).run()
"""
def check(self):
"""
Make sure all of the riggings are less than or equal to the
vacancy number.
TESTS::
sage: La = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(['A',2,1], La[0])
sage: elt = RC(partition_list=[[1,1],[1],[2]])
sage: elt.check()
"""
for a, partition in enumerate(self):
for i, vac_num in enumerate(partition.vacancy_numbers):
if vac_num < partition.rigging[i]:
raise ValueError("rigging can be at most the vacancy number")
def f(self, a):
r"""
Return the action of the crystal operator `f_a` on ``self``.
This implements the method defined in [CrysStructSchilling06]_ which
finds the value `k` which is the length of the string with the
smallest nonpositive rigging of largest length. Then it adds a box
from a string of length `k` in the `a`-th rigged partition, keeping
all colabels fixed and decreasing the new label by one. If no such
string exists, then it adds a new string of length 1 with label `-1`.
If any of the resulting vacancy numbers are larger than the labels
(i.e. it is an invalid rigged configuration), then `f_a` is
undefined.
INPUT:
- ``a`` -- the index of the partition to add a box
OUTPUT:
The resulting rigged configuration element.
EXAMPLES::
sage: La = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: RC = crystals.RiggedConfigurations(['A',2,1], La[0])
sage: elt = RC(partition_list=[[1,1],[1],[2]])
sage: elt.f(0)
<BLANKLINE>
-2[ ][ ]-2
-1[ ]-1
<BLANKLINE>
1[ ]1
<BLANKLINE>
0[ ][ ]0
<BLANKLINE>
sage: elt.f(1)
<BLANKLINE>
0[ ]0
0[ ]0
<BLANKLINE>
-1[ ]-1
-1[ ]-1
<BLANKLINE>
0[ ][ ]0
<BLANKLINE>
sage: elt.f(2)
"""
if not self.phi(a):
return None
return RiggedConfigurationElement.f(self, a)
def weight(self):
"""
Return the weight of ``self``.
EXAMPLES::
sage: La = RootSystem(['A',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: B = crystals.RiggedConfigurations(['A',2,1], La[0])
sage: mg = B.module_generators[0]
sage: mg.f_string([0,1,2,0]).weight()
-Lambda[0] + Lambda[1] + Lambda[2] - 2*delta
"""
P = self.parent().weight_lattice_realization()
alpha = list(P.simple_roots())
return self.parent()._wt - sum(sum(x) * alpha[i] for i,x in enumerate(self))
class RCHWNonSimplyLacedElement(RCNonSimplyLacedElement):
"""
Rigged configurations in highest weight crystals.
TESTS::
sage: La = RootSystem(['C',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: vct = CartanType(['C',2,1]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[0])
sage: elt = RC(partition_list=[[1,1],[2],[2]]); ascii_art(elt)
-1[ ]-1 2[ ][ ]2 -2[ ][ ]-2
-1[ ]-1
sage: TestSuite(elt).run()
"""
def check(self):
"""
Make sure all of the riggings are less than or equal to the
vacancy number.
TESTS::
sage: La = RootSystem(['C',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: vct = CartanType(['C',2,1]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[0])
sage: elt = RC(partition_list=[[1,1],[2],[2]])
sage: elt.check()
"""
for partition in self:
for i, vac_num in enumerate(partition.vacancy_numbers):
if vac_num < partition.rigging[i]:
raise ValueError("rigging can be at most the vacancy number")
def f(self, a):
r"""
Return the action of `f_a` on ``self``.
This works by lifting into the virtual configuration, then applying
.. MATH::
f^v_a = \prod_{j \in \iota(a)} \hat{f}_j^{\gamma_j}
and pulling back.
EXAMPLES::
sage: La = RootSystem(['C',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: vct = CartanType(['C',2,1]).as_folding()
sage: RC = crystals.RiggedConfigurations(vct, La[0])
sage: elt = RC(partition_list=[[1,1],[2],[2]])
sage: elt.f(0)
sage: ascii_art(elt.f(1))
0[ ]0 0[ ][ ]0 -1[ ][ ]-1
0[ ]0 -1[ ]-1
sage: elt.f(2)
"""
if not self.phi(a):
return None
return RCNonSimplyLacedElement.f(self, a)
# FIXME: Do not duplicate with the simply-laced HW RC element class
def weight(self):
"""
Return the weight of ``self``.
EXAMPLES::
sage: La = RootSystem(['C',2,1]).weight_lattice(extended=True).fundamental_weights()
sage: vct = CartanType(['C',2,1]).as_folding()
sage: B = crystals.RiggedConfigurations(vct, La[0])
sage: mg = B.module_generators[0]
sage: mg.f_string([0,1,2]).weight()
2*Lambda[1] - Lambda[2] - delta
"""
P = self.parent().weight_lattice_realization()
alpha = list(P.simple_roots())
return self.parent()._wt - sum(sum(x) * alpha[i] for i,x in enumerate(self))
##############################################
## KR crystal rigged configuration elements ##
##############################################
class KRRiggedConfigurationElement(RiggedConfigurationElement):
r"""
`U_q^{\prime}(\mathfrak{g})` rigged configurations.
EXAMPLES:
We can go between :class:`rigged configurations <RiggedConfigurations>`
and tensor products of :class:`tensor products of KR tableaux
<sage.combinat.rigged_configurations.tensor_product_kr_tableaux.TensorProductOfKirillovReshetikhinTableaux>`::
sage: RC = RiggedConfigurations(['D', 4, 1], [[1,1], [2,1]])
sage: rc_elt = RC(partition_list=[[1], [1,1], [1], [1]])
sage: tp_krtab = rc_elt.to_tensor_product_of_kirillov_reshetikhin_tableaux(); tp_krtab
[[-2]] (X) [[1], [2]]
sage: tp_krcrys = rc_elt.to_tensor_product_of_kirillov_reshetikhin_crystals(); tp_krcrys
[[[-2]], [[1], [2]]]
sage: tp_krcrys == tp_krtab.to_tensor_product_of_kirillov_reshetikhin_crystals()
True
sage: RC(tp_krcrys) == rc_elt
True
sage: RC(tp_krtab) == rc_elt
True
sage: tp_krtab.to_rigged_configuration() == rc_elt
True
"""
def __init__(self, parent, rigged_partitions=[], **options):
r"""
Construct a rigged configuration element.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 1]])
sage: RC(partition_list=[[], [], [], []])
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
sage: RC(partition_list=[[1], [1], [], []])
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
sage: elt = RC(partition_list=[[1], [1], [], []], rigging_list=[[-1], [0], [], []]); elt
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
(/)
<BLANKLINE>
sage: TestSuite(elt).run()
"""
n = len(parent._rc_index)
if "KT_constructor" in options:
# Used only by the Kleber tree
# Not recommended to be called by the user since it avoids safety
# checks for speed
data = options["KT_constructor"]
shape_data = data[0]
rigging_data = data[1]
vac_data = data[2]
nu = []
for i in range(n):
nu.append(RiggedPartition(shape_data[i], rigging_data[i], vac_data[i]))
# Special display case
if parent.cartan_type().type() == 'B':
nu[-1] = RiggedPartitionTypeB(nu[-1])
ClonableArray.__init__(self, parent, nu)
return
RiggedConfigurationElement.__init__(self, parent, rigged_partitions, n=n, **options)
# Special display case
if parent.cartan_type().type() == 'B':
self._set_mutable()
self[-1] = RiggedPartitionTypeB(self[-1])
self.set_immutable()
def check(self):
"""
Make sure all of the riggings are less than or equal to the
vacancy number.
TESTS::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 1]])
sage: elt = RC(partition_list=[[1], [1], [], []])
sage: elt.check()
"""
for partition in self:
for i, vac_num in enumerate(partition.vacancy_numbers):
if vac_num < partition.rigging[i]:
raise ValueError("rigging can be at most the vacancy number")
def e(self, a):
r"""
Return the action of the crystal operator `e_a` on ``self``.
For the classical operators, this implements the method defined
in [CrysStructSchilling06]_. For `e_0`, this converts the class to
a tensor product of KR tableaux and does the corresponding `e_0`
and pulls back.
.. TODO::
Implement `e_0` without appealing to tensor product of
KR tableaux.
INPUT:
- ``a`` -- the index of the partition to remove a box
OUTPUT:
The resulting rigged configuration element.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2,1]])
sage: elt = RC(partition_list=[[1], [1], [1], [1]])
sage: elt.e(3)
sage: elt.e(1)
<BLANKLINE>
(/)
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
"""
if a not in self.parent()._cartan_type.index_set():
raise ValueError("{} is not in the index set".format(a))
if a == self.parent()._cartan_type.special_node():
try:
ret = self.to_tensor_product_of_kirillov_reshetikhin_tableaux().e(a)
if ret is None:
return None
return ret.to_rigged_configuration()
except NotImplementedError:
# We haven't implemented the bijection yet, so return None
# This is to make sure we can at least view it as a classical
# crystal if there is no bijection.
return None
return RiggedConfigurationElement.e(self, a)
def f(self, a):
r"""
Return the action of the crystal operator `f_a` on ``self``.
For the classical operators, this implements the method defined
in [CrysStructSchilling06]_. For `f_0`, this converts the class to
a tensor product of KR tableaux and does the corresponding `f_0`
and pulls back.
.. TODO::
Implement `f_0` without appealing to tensor product of
KR tableaux.
INPUT:
- ``a`` -- the index of the partition to add a box
OUTPUT:
The resulting rigged configuration element.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2,1]])
sage: elt = RC(partition_list=[[1], [1], [1], [1]])
sage: elt.f(1)
sage: elt.f(2)
<BLANKLINE>
0[ ]0
<BLANKLINE>
-1[ ]-1
-1[ ]-1
<BLANKLINE>
1[ ]1
<BLANKLINE>
-1[ ]-1
<BLANKLINE>
"""
ct = self.parent()._cartan_type
if a not in ct.index_set():
raise ValueError("{} is not in the index set".format(a))
if a == ct.special_node():
try:
ret = self.to_tensor_product_of_kirillov_reshetikhin_tableaux().f(a)
if ret is None:
return None
return ret.to_rigged_configuration()
except NotImplementedError:
# We haven't implemented the bijection yet, so return None
# This is to make sure we can at least view it as a classical
# crystal if there is no bijection.
return None
if not self.phi(a):
return None
return RiggedConfigurationElement.f(self, a)
def epsilon(self, a):
r"""
Return `\varepsilon_a` of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: I = RC.index_set()
sage: matrix([[mg.epsilon(i) for i in I] for mg in RC.module_generators])
[4 0 0 0 0]
[3 0 0 0 0]
[2 0 0 0 0]
"""
if a == self.parent()._cartan_type.special_node():
return self.to_tensor_product_of_kirillov_reshetikhin_tableaux().epsilon(a)
return RiggedConfigurationElement.epsilon(self, a)
def phi(self, a):
r"""
Return `\varphi_a` of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: I = RC.index_set()
sage: matrix([[mg.phi(i) for i in I] for mg in RC.module_generators])
[0 0 2 0 0]
[1 0 1 0 0]
[2 0 0 0 0]
"""
if a == self.parent()._cartan_type.special_node():
return self.to_tensor_product_of_kirillov_reshetikhin_tableaux().phi(a)
return RiggedConfigurationElement.phi(self, a)
def weight(self):
"""
Return the weight of ``self``.
EXAMPLES::
sage: RC = RiggedConfigurations(['E', 6, 1], [[2,2]])
sage: [x.weight() for x in RC.module_generators]
[-4*Lambda[0] + 2*Lambda[2], -2*Lambda[0] + Lambda[2], 0]
sage: KR = crystals.KirillovReshetikhin(['E',6,1], 2,2)
sage: [x.weight() for x in KR.module_generators] # long time
[0, -2*Lambda[0] + Lambda[2], -4*Lambda[0] + 2*Lambda[2]]
sage: RC = RiggedConfigurations(['D', 6, 1], [[4,2]])
sage: [x.weight() for x in RC.module_generators]
[-4*Lambda[0] + 2*Lambda[4], -4*Lambda[0] + Lambda[2] + Lambda[4],
-2*Lambda[0] + Lambda[4], -4*Lambda[0] + 2*Lambda[2],
-2*Lambda[0] + Lambda[2], 0]
"""
WLR = self.parent().weight_lattice_realization()
La = WLR.fundamental_weights()
cl_index = self.parent()._rc_index
wt = WLR.sum((self.phi(i) - self.epsilon(i)) * La[i] for i in cl_index)
return -wt.level() * La[0] + wt
@cached_method
def classical_weight(self):
r"""
Return the classical weight of ``self``.
The classical weight `\Lambda` of a rigged configuration is
.. MATH::
\Lambda = \sum_{a \in \overline{I}} \sum_{i > 0}
i L_i^{(a)} \Lambda_a - \sum_{a \in \overline{I}} \sum_{i > 0}
i m_i^{(a)} \alpha_a.
EXAMPLES::
sage: RC = RiggedConfigurations(['D',4,1], [[2,2]])
sage: elt = RC(partition_list=[[2],[2,1],[1],[1]])
sage: elt.classical_weight()
(0, 1, 1, 0)
This agrees with the corresponding classical weight as KR tableaux::
sage: krt = elt.to_tensor_product_of_kirillov_reshetikhin_tableaux(); krt
[[2, 1], [3, -1]]
sage: krt.classical_weight() == elt.classical_weight()
True
TESTS:
We check the classical weights agree in an entire crystal::
sage: RC = RiggedConfigurations(['A',2,1], [[2,1], [1,1]])
sage: for x in RC:
....: y = x.to_tensor_product_of_kirillov_reshetikhin_tableaux()
....: assert x.classical_weight() == y.classical_weight()
"""
F = self.cartan_type().classical().root_system()
if F.ambient_space() is None:
WLR = F.weight_lattice()
else:
WLR = F.ambient_space()
La = WLR.fundamental_weights()
wt = WLR.sum(La[r] * s for r,s in self.parent().dims)
alpha = WLR.simple_roots()
rc_index = self.parent()._rc_index
for a, nu in enumerate(self):
wt -= sum(nu) * alpha[rc_index[a]]
return wt
def to_tensor_product_of_kirillov_reshetikhin_tableaux(self, display_steps=False, build_graph=False):
r"""
Perform the bijection from this rigged configuration to a tensor
product of Kirillov-Reshetikhin tableaux given in [RigConBijection]_
for single boxes and with [BijectionLRT]_ and [BijectionDn]_ for
multiple columns and rows.
.. NOTE::
This is only proven to be a bijection in types `A_n^{(1)}`
and `D_n^{(1)}`, as well as `\bigotimes_i B^{r_i,1}` and
`\bigotimes_i B^{1,s_i}` for general affine types.
INPUT:
- ``display_steps`` -- (default: ``False``) boolean which indicates
if we want to print each step in the algorithm
- ``build_graph`` -- (default: ``False``) boolean which indicates
if we want to construct and return a graph of the bijection whose
vertices are rigged configurations obtained at each step and edges
are labeled by either the return value of `\delta` or the
doubling/halving map
OUTPUT:
- The tensor product of KR tableaux element corresponding to this
rigged configuration.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 4, 1], [[2, 2]])
sage: RC(partition_list=[[2], [2,2], [2], [2]]).to_tensor_product_of_kirillov_reshetikhin_tableaux()
[[3, 3], [5, 5]]
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: elt = RC(partition_list=[[2], [2,2], [1], [1]])
sage: tp_krt = elt.to_tensor_product_of_kirillov_reshetikhin_tableaux(); tp_krt
[[2, 3], [3, -2]]
This is invertible by calling
:meth:`~sage.combinat.rigged_configurations.tensor_product_kr_tableaux_element.TensorProductOfKirillovReshetikhinTableauxElement.to_rigged_configuration()`::
sage: ret = tp_krt.to_rigged_configuration(); ret
<BLANKLINE>
0[ ][ ]0
<BLANKLINE>
-2[ ][ ]-2
-2[ ][ ]-2
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
sage: elt == ret
True
To view the steps of the bijection in the output, run with
the ``display_steps=True`` option::
sage: elt.to_tensor_product_of_kirillov_reshetikhin_tableaux(True)
====================
...
====================
<BLANKLINE>
0[ ]0
<BLANKLINE>
-2[ ][ ]-2
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
--------------------
[[3, 2]]
--------------------
...
[[2, 3], [3, -2]]
We can also construct and display a graph of the bijection
as follows::
sage: ret, G = elt.to_tensor_product_of_kirillov_reshetikhin_tableaux(build_graph=True)
sage: view(G) # not tested
"""
from sage.combinat.rigged_configurations.bijection import RCToKRTBijection
bij = RCToKRTBijection(self)
ret = bij.run(display_steps, build_graph)
if build_graph:
return (ret, bij._graph)
return ret
def to_tensor_product_of_kirillov_reshetikhin_crystals(self, display_steps=False, build_graph=False):
r"""
Return the corresponding tensor product of Kirillov-Reshetikhin
crystals.
This is a composition of the map to a tensor product of KR tableaux,
and then to a tensor product of KR crystals.
INPUT:
- ``display_steps`` -- (default: ``False``) boolean which indicates
if we want to print each step in the algorithm
- ``build_graph`` -- (default: ``False``) boolean which indicates
if we want to construct and return a graph of the bijection whose
vertices are rigged configurations obtained at each step and edges
are labeled by either the return value of `\delta` or the
doubling/halving map
EXAMPLES::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2]])
sage: elt = RC(partition_list=[[2], [2,2], [1], [1]])
sage: krc = elt.to_tensor_product_of_kirillov_reshetikhin_crystals(); krc
[[[2, 3], [3, -2]]]
We can recover the rigged configuration::
sage: ret = RC(krc); ret
<BLANKLINE>
0[ ][ ]0
<BLANKLINE>
-2[ ][ ]-2
-2[ ][ ]-2
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
sage: elt == ret
True
We can also construct and display a graph of the bijection
as follows::
sage: ret, G = elt.to_tensor_product_of_kirillov_reshetikhin_crystals(build_graph=True)
sage: view(G) # not tested
"""
if build_graph:
kr_tab, G = self.to_tensor_product_of_kirillov_reshetikhin_tableaux(display_steps, build_graph)
return (kr_tab.to_tensor_product_of_kirillov_reshetikhin_crystals(), G)
kr_tab = self.to_tensor_product_of_kirillov_reshetikhin_tableaux(display_steps)
return kr_tab.to_tensor_product_of_kirillov_reshetikhin_crystals()
# TODO: Move the morphisms to a lazy attribute of RiggedConfigurations
# once #15463 is done
def left_split(self):
r"""
Return the image of ``self`` under the left column splitting
map `\beta`.
Consider the map `\beta : RC(B^{r,s} \otimes B) \to RC(B^{r,1}
\otimes B^{r,s-1} \otimes B)` for `s > 1` which is a natural classical
crystal injection. On rigged configurations, the map `\beta` does
nothing (except possibly changing the vacancy numbers).
EXAMPLES::
sage: RC = RiggedConfigurations(['C',4,1], [[3,3]])
sage: mg = RC.module_generators[-1]
sage: ascii_art(mg)
0[ ][ ]0 0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ][ ]0 0[ ]0
sage: ascii_art(mg.left_split())
0[ ][ ]0 0[ ][ ]0 1[ ][ ]0 0[ ]0
0[ ][ ]0 1[ ][ ]0 0[ ]0
1[ ][ ]0 0[ ]0
"""
P = self.parent()
if P.dims[0][1] == 1:
raise ValueError("cannot split a single column")
r,s = P.dims[0]
B = [[r,1], [r,s-1]]
B.extend(P.dims[1:])
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
RC = RiggedConfigurations(P._cartan_type, B)
return RC(*[x._clone() for x in self]) # Make a deep copy
def right_split(self):
r"""
Return the image of ``self`` under the right column splitting
map `\beta^*`.
Let `\theta` denote the
:meth:`complement rigging map<complement_rigging>` which reverses
the tensor factors and `\beta` denote the
:meth:`left splitting map<left_split>`, we define the right
splitting map by `\beta^* := \theta \circ \beta \circ \theta`.
EXAMPLES::
sage: RC = RiggedConfigurations(['C',4,1], [[3,3]])
sage: mg = RC.module_generators[-1]
sage: ascii_art(mg)
0[ ][ ]0 0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ][ ]0 0[ ]0
sage: ascii_art(mg.right_split())
0[ ][ ]0 0[ ][ ]0 1[ ][ ]1 0[ ]0
0[ ][ ]0 1[ ][ ]1 0[ ]0
1[ ][ ]1 0[ ]0
sage: RC = RiggedConfigurations(['D',4,1], [[2,2],[1,2]])
sage: elt = RC(partition_list=[[3,1], [2,2,1], [2,1], [2]])
sage: ascii_art(elt)
-1[ ][ ][ ]-1 0[ ][ ]0 -1[ ][ ]-1 1[ ][ ]1
0[ ]0 0[ ][ ]0 -1[ ]-1
0[ ]0
sage: ascii_art(elt.right_split())
-1[ ][ ][ ]-1 0[ ][ ]0 -1[ ][ ]-1 1[ ][ ]1
1[ ]0 0[ ][ ]0 -1[ ]-1
0[ ]0
We check that the bijection commutes with the right splitting map::
sage: RC = RiggedConfigurations(['A', 3, 1], [[1,1], [2,2]])
sage: all(rc.right_split().to_tensor_product_of_kirillov_reshetikhin_tableaux()
....: == rc.to_tensor_product_of_kirillov_reshetikhin_tableaux().right_split() for rc in RC)
True
"""
return self.complement_rigging(True).left_split().complement_rigging(True)
def left_box(self, return_b=False):
r"""
Return the image of ``self`` under the left box removal map `\delta`.
The map `\delta : RC(B^{r,1} \otimes B) \to RC(B^{r-1,1}
\otimes B)` (if `r = 1`, then we remove the left-most factor) is the
basic map in the bijection `\Phi` between rigged configurations and
tensor products of Kirillov-Reshetikhin tableaux. For more
information, see
:meth:`to_tensor_product_of_kirillov_reshetikhin_tableaux()`.
We can extend `\delta` when the left-most factor is not a single
column by precomposing with a :meth:`left_split()`.
.. NOTE::
Due to the special nature of the bijection for the spinor cases in
types `D_n^{(1)}`, `B_n^{(1)}`, and `A_{2n-1}^{(2)}`, this map is
not defined in these cases.
INPUT:
- ``return_b`` -- (default: ``False``) whether to return the
resulting letter from `\delta`
OUTPUT:
The resulting rigged configuration or if ``return_b`` is ``True``,
then a tuple of the resulting rigged configuration and the letter.
EXAMPLES::
sage: RC = RiggedConfigurations(['C',4,1], [[3,2]])
sage: mg = RC.module_generators[-1]
sage: ascii_art(mg)
0[ ][ ]0 0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ][ ]0 0[ ]0
sage: ascii_art(mg.left_box())
0[ ]0 0[ ][ ]0 0[ ][ ]0 0[ ]0
0[ ]0 0[ ][ ]0 0[ ]0
sage: x,b = mg.left_box(True)
sage: b
-1
sage: b.parent()
The crystal of letters for type ['C', 4]
"""
# Don't do spinor cases
P = self.parent()
ct = P.cartan_type()
if ct.type() == 'D':
if P.dims[0][0] >= ct.rank() - 2:
raise ValueError("only for non-spinor cases")
elif ct.type() == 'B' or ct.dual().type() == 'B':
if P.dims[0][0] == ct.rank() - 1:
raise ValueError("only for non-spinor cases")
from sage.combinat.rigged_configurations.bijection import RCToKRTBijection
rc = self
if P.dims[0][1] != 1:
rc = self.left_split()
bij = RCToKRTBijection(rc)
ht = bij.cur_dims[0][0]
bij.cur_dims[0][0] = bij._next_index(ht)
b = bij.next_state(ht)
if bij.cur_dims[0][0] == 0:
bij.cur_dims.pop(0)
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
RC = RiggedConfigurations(ct, bij.cur_dims)
rc = RC(*bij.cur_partitions)
if return_b:
from sage.combinat.crystals.letters import CrystalOfLetters
L = CrystalOfLetters(self.parent()._cartan_type.classical())
return (rc, L(b))
return rc
delta = left_box
def left_column_box(self):
r"""
Return the image of ``self`` under the left column box splitting
map `\gamma`.
Consider the map `\gamma : RC(B^{r,1} \otimes B) \to RC(B^{1,1}
\otimes B^{r-1,1} \otimes B)` for `r > 1`, which is a natural strict
classical crystal injection. On rigged configurations, the map
`\gamma` adds a singular string of length `1` to `\nu^{(a)}`.
We can extend `\gamma` when the left-most factor is not a single
column by precomposing with a :meth:`left_split()`.
EXAMPLES::
sage: RC = RiggedConfigurations(['C',3,1], [[3,1], [2,1]])
sage: mg = RC.module_generators[-1]
sage: ascii_art(mg)
0[ ]0 0[ ][ ]0 0[ ]0
0[ ]0 0[ ]0
sage: ascii_art(mg.left_column_box())
0[ ]0 0[ ][ ]0 0[ ]0
0[ ]0 0[ ]0 0[ ]0
0[ ]0
sage: RC = RiggedConfigurations(['C',3,1], [[2,1], [1,1], [3,1]])
sage: mg = RC.module_generators[7]
sage: ascii_art(mg)
1[ ]0 0[ ][ ]0 0[ ]0
0[ ]0 0[ ]0
sage: ascii_art(mg.left_column_box())
1[ ]1 0[ ][ ]0 0[ ]0
1[ ]0 0[ ]0 0[ ]0
"""
P = self.parent()
r = P.dims[0][0]
if r == 1:
raise ValueError("cannot split a single box")
ct = P.cartan_type()
if ct.type() == 'D':
if P.dims[0][0] >= ct.rank() - 2:
raise ValueError("only for non-spinor cases")
elif ct.type() == 'B' or ct.dual().type() == 'B':
if P.dims[0][0] == ct.rank() - 1:
raise ValueError("only for non-spinor cases")
if P.dims[0][1] > 1:
return self.left_split().left_column_box()
B = [[1,1], [r-1,1]]
B.extend(P.dims[1:])
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
RC = RiggedConfigurations(P._cartan_type, B)
parts = [x._clone() for x in self] # Make a deep copy
for nu in parts[:r-1]:
nu._list.append(1)
for a, nu in enumerate(parts[:r-1]):
vac_num = RC._calc_vacancy_number(parts, a, 1)
i = nu._list.index(1)
nu.vacancy_numbers.insert(i, vac_num)
nu.rigging.insert(i, vac_num)
return RC(*parts)
def right_column_box(self):
r"""
Return the image of ``self`` under the right column box splitting
map `\gamma^*`.
Consider the map `\gamma^* : RC(B \otimes B^{r,1}) \to RC(B \otimes
B^{r-1,1} \otimes B^{1,1})` for `r > 1`, which is a natural strict
classical crystal injection. On rigged configurations, the map
`\gamma` adds a string of length `1` with rigging 0 to `\nu^{(a)}`
for all `a < r` to a classically highest weight element and extended
as a classical crystal morphism.
We can extend `\gamma^*` when the right-most factor is not a single
column by precomposing with a :meth:`right_split()`.
EXAMPLES::
sage: RC = RiggedConfigurations(['C',3,1], [[2,1], [1,1], [3,1]])
sage: mg = RC.module_generators[7]
sage: ascii_art(mg)
1[ ]0 0[ ][ ]0 0[ ]0
0[ ]0 0[ ]0
sage: ascii_art(mg.right_column_box())
1[ ]0 0[ ][ ]0 0[ ]0
1[ ]0 0[ ]0 0[ ]0
0[ ]0
"""
P = self.parent()
r = P.dims[-1][0]
if r == 1:
raise ValueError("cannot split a single box")
ct = P.cartan_type()
if ct.type() == 'D':
if P.dims[-1][0] >= ct.rank() - 2:
raise ValueError("only for non-spinor cases")
elif ct.type() == 'B' or ct.dual().type() == 'B':
if P.dims[-1][0] == ct.rank() - 1:
raise ValueError("only for non-spinor cases")
if P.dims[-1][1] > 1:
return self.right_split().right_column_box()
rc, e_string = self.to_highest_weight(P._rc_index)
B = P.dims[:-1] + ([r-1,1], [1,1])
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
RC = RiggedConfigurations(P._cartan_type, B)
parts = [x._clone() for x in rc] # Make a deep copy
for nu in parts[:r-1]:
nu._list.append(1)
for a, nu in enumerate(parts[:r-1]):
vac_num = RC._calc_vacancy_number(parts, a, -1)
nu.vacancy_numbers.append(vac_num)
nu.rigging.append(0)
return RC(*parts).f_string(reversed(e_string))
def complement_rigging(self, reverse_factors=False):
r"""
Apply the complement rigging morphism `\theta` to ``self``.
Consider a highest weight rigged configuration `(\nu, J)`, the
complement rigging morphism `\theta : RC(L) \to RC(L)` is given by
sending `(\nu, J) \mapsto (\nu, J')`, where `J'` is obtained by
taking the coriggings `x' = p_i^{(a)} - x`, and then extending as
a crystal morphism. (The name comes from taking the complement
partition for the riggings in a `m_i^{(a)} \times p_i^{(a)}` box.)
INPUT:
- ``reverse_factors`` -- (default: ``False``) if ``True``, then this
returns an element in `RC(B')` where `B'` is the tensor factors
of ``self`` in reverse order
EXAMPLES::
sage: RC = RiggedConfigurations(['D',4,1], [[1,1],[2,2]])
sage: mg = RC.module_generators[-1]
sage: ascii_art(mg)
1[ ][ ]1 0[ ][ ]0 0[ ][ ]0 0[ ][ ]0
0[ ][ ]0
sage: ascii_art(mg.complement_rigging())
1[ ][ ]0 0[ ][ ]0 0[ ][ ]0 0[ ][ ]0
0[ ][ ]0
sage: lw = mg.to_lowest_weight([1,2,3,4])[0]
sage: ascii_art(lw)
-1[ ][ ]-1 0[ ][ ]0 0[ ][ ]0 0[ ][ ]0
-1[ ]-1 0[ ][ ]0 0[ ]0 0[ ]0
-1[ ]-1 0[ ]0
0[ ]0
sage: ascii_art(lw.complement_rigging())
-1[ ][ ][ ]-1 0[ ][ ][ ]0 0[ ][ ][ ]0 0[ ][ ][ ]0
-1[ ]-1 0[ ][ ][ ]0
sage: lw.complement_rigging() == mg.complement_rigging().to_lowest_weight([1,2,3,4])[0]
True
sage: mg.complement_rigging(True).parent()
Rigged configurations of type ['D', 4, 1] and factor(s) ((2, 2), (1, 1))
We check that the Lusztig involution (under the modification of also
mapping to the highest weight element) intertwines with the
complement map `\theta` (that reverses the tensor factors)
under the bijection `\Phi`::
sage: RC = RiggedConfigurations(['D', 4, 1], [[2, 2], [2, 1], [1, 2]])
sage: for mg in RC.module_generators: # long time
....: y = mg.to_tensor_product_of_kirillov_reshetikhin_tableaux()
....: hw = y.lusztig_involution().to_highest_weight([1,2,3,4])[0]
....: c = mg.complement_rigging(True)
....: hwc = c.to_tensor_product_of_kirillov_reshetikhin_tableaux()
....: assert hw == hwc
TESTS:
We check that :trac:`18898` is fixed::
sage: RC = RiggedConfigurations(['D',4,1], [[2,1], [2,1], [2,3]])
sage: x = RC(partition_list=[[1], [1,1], [1], [1]], rigging_list=[[0], [2,1], [0], [0]])
sage: ascii_art(x)
0[ ]0 2[ ]2 0[ ]0 0[ ]0
2[ ]1
sage: ascii_art(x.complement_rigging())
0[ ]0 2[ ]1 0[ ]0 0[ ]0
2[ ]0
"""
P = self.parent()
if reverse_factors:
from sage.combinat.rigged_configurations.rigged_configurations import RiggedConfigurations
P = RiggedConfigurations(P._cartan_type, reversed(P.dims))
mg, e_str = self.to_highest_weight(P._rc_index)
nu = []
rig = []
for a,p in enumerate(mg):
nu.append(list(p))
vac_nums = p.vacancy_numbers
riggings = [vac - p.rigging[i] for i,vac in enumerate(vac_nums)]
block = 0
for j,i in enumerate(p):
if p[block] != i:
riggings[block:j] = sorted(riggings[block:j], reverse=True)
block = j
riggings[block:] = sorted(riggings[block:], reverse=True)
rig.append(riggings)
rc = P(partition_list=nu, rigging_list=rig)
return rc.f_string(reversed(e_str))
class KRRCSimplyLacedElement(KRRiggedConfigurationElement):
r"""
`U_q^{\prime}(\mathfrak{g})` rigged configurations in simply-laced types.
TESTS::
sage: RC = RiggedConfigurations(['A', 3, 1], [[3, 2], [2,1], [1,1]])
sage: elt = RC(partition_list=[[1], [1], []]); elt
<BLANKLINE>
0[ ]0
<BLANKLINE>
0[ ]0
<BLANKLINE>
(/)
<BLANKLINE>
sage: TestSuite(elt).run()
"""
@cached_method
def cocharge(self):
r"""
Compute the cocharge statistic of ``self``.
Computes the cocharge statistic [CrysStructSchilling06]_ on this
rigged configuration `(\nu, J)`. The cocharge statistic is defined as:
.. MATH::
cc(\nu, J) = \frac{1}{2} \sum_{a, b \in I_0}
\sum_{j,k > 0} \left( \alpha_a \mid \alpha_b \right)
\min(j, k) m_j^{(a)} m_k^{(b)}
+ \sum_{a \in I} \sum_{i > 0} \left\lvert J^{(a, i)} \right\rvert.
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 3, 1], [[3, 2], [2,1], [1,1]])
sage: RC(partition_list=[[1], [1], []]).cocharge()
1
"""
cc = 0
rigging_sum = 0
for a, p in enumerate(self):
for pos, i in enumerate(p._list):
# Add the rigging
rigging_sum += p.rigging[pos]
# Add the L matrix contribution
for dim in self.parent().dims:
if dim[0] == a + 1:
cc += min(dim[1], i)
# Subtract the vacancy number
cc -= p.vacancy_numbers[pos]
return cc // 2 + rigging_sum
cc = cocharge
@cached_method
def charge(self):
r"""
Compute the charge statistic of ``self``.
Let `B` denote a set of rigged configurations. The *charge* `c` of
a rigged configuration `b` is computed as
.. MATH::
c(b) = \max(cc(b) \mid b \in B) - cc(b).
EXAMPLES::
sage: RC = RiggedConfigurations(['A', 3, 1], [[3, 2], [2,1], [1,1]])
sage: RC(partition_list=[[],[],[]]).charge()
2
sage: RC(partition_list=[[1], [1], []]).charge()
1
"""
B = self.parent()
if not hasattr(B, "_max_charge"):
B._max_charge = max(b.cocharge() for b in B.module_generators)
return B._max_charge - self.cocharge()
class KRRCNonSimplyLacedElement(KRRiggedConfigurationElement, RCNonSimplyLacedElement):
r"""
`U_q^{\prime}(\mathfrak{g})` rigged configurations in non-simply-laced
types.
TESTS::
sage: RC = RiggedConfigurations(['C',2,1], [[1,2],[1,1],[2,1]])
sage: elt = RC(partition_list=[[3],[2]]); elt
<BLANKLINE>
0[ ][ ][ ]0
<BLANKLINE>
0[ ][ ]0
sage: TestSuite(elt).run()
"""
def e(self, a):
r"""
Return the action of `e_a` on ``self``.
This works by lifting into the virtual configuration, then applying
.. MATH::
e^v_a = \prod_{j \in \iota(a)} \hat{e}_j^{\gamma_j}
and pulling back.
EXAMPLES::
sage: RC = RiggedConfigurations(['A',6,2], [[1,1]]*7)
sage: elt = RC(partition_list=[[1]*5,[2,1,1],[3,2]])
sage: elt.e(3)
<BLANKLINE>
0[ ]0
0[ ]0
0[ ]0
0[ ]0
0[ ]0
<BLANKLINE>
0[ ][ ]0
1[ ]1
1[ ]1
<BLANKLINE>
1[ ][ ]1
1[ ]0
<BLANKLINE>
"""
if a == self.parent()._cartan_type.special_node():
try:
ret = self.to_tensor_product_of_kirillov_reshetikhin_tableaux().e(a)
if ret is None:
return None
return ret.to_rigged_configuration()
except (NotImplementedError, TypeError):
# We haven't implemented the bijection yet, so try by lifting
# to the simply-laced case
return RCNonSimplyLacedElement.e(self, a)
if not self.epsilon(a):
return None
return RCNonSimplyLacedElement.e(self, a)
def f(self, a):
r"""
Return the action of `f_a` on ``self``.
This works by lifting into the virtual configuration, then applying
.. MATH::
f^v_a = \prod_{j \in \iota(a)} \hat{f}_j^{\gamma_j}
and pulling back.
EXAMPLES::
sage: RC = RiggedConfigurations(['A',6,2], [[1,1]]*7)
sage: elt = RC(partition_list=[[1]*5,[2,1,1],[2,1]], rigging_list=[[0]*5,[0,1,1],[1,0]])
sage: elt.f(3)
<BLANKLINE>
0[ ]0
0[ ]0
0[ ]0
0[ ]0
0[ ]0
<BLANKLINE>
1[ ][ ]1
1[ ]1
1[ ]1
<BLANKLINE>
-1[ ][ ][ ]-1
0[ ][ ]0
<BLANKLINE>
"""
if a == self.parent()._cartan_type.special_node():
try:
ret = self.to_tensor_product_of_kirillov_reshetikhin_tableaux().f(a)
if ret is None:
return None
return ret.to_rigged_configuration()
except (NotImplementedError, TypeError):
# We haven't implemented the bijection yet, so try by lifting
# to the simply-laced case
return RCNonSimplyLacedElement.f(self, a)
if not self.phi(a):
return None
return RCNonSimplyLacedElement.f(self, a)
@cached_method
def cocharge(self):
r"""
Compute the cocharge statistic.
Computes the cocharge statistic [OSS03]_ on this
rigged configuration `(\nu, J)` by computing the cocharge as a virtual
rigged configuration `(\hat{\nu}, \hat{J})` and then using the
identity `cc(\hat{\nu}, \hat{J}) = \gamma_0 cc(\nu, J)`.
EXAMPLES::
sage: RC = RiggedConfigurations(['C', 3, 1], [[2,1], [1,1]])
sage: RC(partition_list=[[1,1],[2,1],[1,1]]).cocharge()
1
"""
#return self.to_virtual_configuration().cocharge() / self.parent()._folded_ct.gamma[0]
vct = self.parent()._folded_ct
cc = ZZ.zero()
rigging_sum = ZZ.zero()
sigma = vct.folding_orbit()
gamma = vct.scaling_factors()
for a, p in enumerate(self):
t_check = len(sigma[a + 1]) * gamma[a+1] // gamma[0]
for pos, i in enumerate(p._list):
# Add the rigging
rigging_sum += t_check * p.rigging[pos]
# Add the L matrix contribution
for dim in self.parent().dims:
if dim[0] == a + 1:
cc += t_check * min(dim[1], i)
# Subtract the vacancy number
cc -= t_check * p.vacancy_numbers[pos]
return cc // 2 + rigging_sum
cc = cocharge
class KRRCTypeA2DualElement(KRRCNonSimplyLacedElement):
r"""
`U_q^{\prime}(\mathfrak{g})` rigged configurations in type
`A_{2n}^{(2)\dagger}`.
"""
def epsilon(self, a):
r"""
Return the value of `\varepsilon_a` of ``self``.
Here we need to modify the usual definition by
`\varepsilon_n^{\prime} := 2 \varepsilon_n`.
EXAMPLES::
sage: RC = RiggedConfigurations(CartanType(['A',4,2]).dual(), [[1,1], [2,2]])
sage: def epsilon(x, i):
....: x = x.e(i)
....: eps = 0
....: while x is not None:
....: x = x.e(i)
....: eps = eps + 1
....: return eps
sage: all(epsilon(rc, 2) == rc.epsilon(2) for rc in RC)
True
"""
if a == self.parent()._cartan_type.special_node():
return self.to_tensor_product_of_kirillov_reshetikhin_tableaux().epsilon(a)
a = self.parent()._rc_index_inverse[a]
if not self[a]:
epsilon = 0
else:
epsilon = -min(0, min(self[a].rigging))
n = len(self.parent()._rc_index)
if a == n-1: # -1 for indexing
epsilon *= 2
return Integer(epsilon)
def phi(self, a):
r"""
Return the value of `\varphi_a` of ``self``.
Here we need to modify the usual definition by
`\varphi_n^{\prime} := 2 \varphi_n`.
EXAMPLES::
sage: RC = RiggedConfigurations(CartanType(['A',4,2]).dual(), [[1,1], [2,2]])
sage: def phi(x, i):
....: x = x.f(i)
....: ph = 0
....: while x is not None:
....: x = x.f(i)
....: ph = ph + 1
....: return ph
sage: all(phi(rc, 2) == rc.phi(2) for rc in RC)
True
"""
if a == self.parent()._cartan_type.special_node():
return self.to_tensor_product_of_kirillov_reshetikhin_tableaux().phi(a)
a = self.parent()._rc_index_inverse[a]
p_inf = self.parent()._calc_vacancy_number(self, a, float("inf"))
if not self[a]:
phi = p_inf
else:
phi = p_inf - min(0, min(self[a].rigging))
n = len(self.parent()._rc_index)
if a == n-1: # -1 for indexing
phi *= 2
return Integer(phi)
@cached_method
def cocharge(self):
r"""
Compute the cocharge statistic.
Computes the cocharge statistic [RigConBijection]_ on this
rigged configuration `(\nu, J)`. The cocharge statistic is
computed as:
.. MATH::
cc(\nu, J) = \frac{1}{2} \sum_{a \in I_0} \sum_{i > 0}
t_a^{\vee} m_i^{(a)} \left( \sum_{j > 0} \min(i, j) L_j^{(a)}
- p_i^{(a)} \right) + \sum_{a \in I} t_a^{\vee} \sum_{i > 0}
\left\lvert J^{(a, i)} \right\rvert.
EXAMPLES::
sage: RC = RiggedConfigurations(CartanType(['A',4,2]).dual(), [[1,1],[2,2]])
sage: sc = RC.cartan_type().as_folding().scaling_factors()
sage: all(mg.cocharge() * sc[0] == mg.to_virtual_configuration().cocharge()
....: for mg in RC.module_generators)
True
"""
# return self.to_virtual_configuration().cocharge() / self.parent()._folded_ct.gamma[0]
cc = ZZ.zero()
rigging_sum = ZZ.zero()
# vct = self.parent()._folded_ct
# sigma = vct.folding_orbit()
# gammatilde = list(vct.scaling_factors())
# gammatilde[-1] = 2
for a, p in enumerate(self):
t_check = 1 # == len(sigma[a+1]) * gammatilde[a+1] / gammatilde[0]
for pos, i in enumerate(p._list):
# Add the rigging
rigging_sum += t_check * p.rigging[pos]
# Add the L matrix contribution
for dim in self.parent().dims:
if dim[0] == a + 1:
cc += t_check * min(dim[1], i)
# Subtract the vacancy number
cc -= t_check * p.vacancy_numbers[pos]
return cc / ZZ(2) + rigging_sum
cc = cocharge
| 35.436515 | 165 | 0.503829 |
af638ebcf2be5669d3542a6629636f93e961f797 | 3,143 | py | Python | transformer.py | fangyihao/teamnet | 4f906b80f17626b0b2aedf9b6f495dbd0eb47dd6 | [
"Apache-2.0"
] | 1 | 2022-03-25T03:07:28.000Z | 2022-03-25T03:07:28.000Z | transformer.py | fangyihao/teamnet | 4f906b80f17626b0b2aedf9b6f495dbd0eb47dd6 | [
"Apache-2.0"
] | 1 | 2022-01-05T06:08:00.000Z | 2022-01-05T06:08:29.000Z | transformer.py | fangyihao/teamnet | 4f906b80f17626b0b2aedf9b6f495dbd0eb47dd6 | [
"Apache-2.0"
] | 1 | 2021-07-15T07:25:08.000Z | 2021-07-15T07:25:08.000Z | '''
Created on Jun 21, 2018
@author: fangy5
'''
# Imports we need.
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
import collections
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import t2t_model
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
# Enable TF Eager execution
tfe = tf.contrib.eager
tfe.enable_eager_execution()
# Other setup
Modes = tf.estimator.ModeKeys
# Setup some directories
data_dir = os.path.expanduser("~/t2t/data")
tmp_dir = os.path.expanduser("~/t2t/tmp")
train_dir = os.path.expanduser("~/t2t/train")
checkpoint_dir = os.path.expanduser("~/t2t/checkpoints")
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
tf.gfile.MakeDirs(train_dir)
tf.gfile.MakeDirs(checkpoint_dir)
gs_data_dir = "gs://tensor2tensor-data"
gs_ckpt_dir = "gs://tensor2tensor-checkpoints/"
print(problems.available())
# Fetch the problem
ende_problem = problems.problem("translate_ende_wmt32k")
# Copy the vocab file locally so we can encode inputs and decode model outputs
# All vocabs are stored on GCS
vocab_name = "vocab.ende.32768"
vocab_file = os.path.join(gs_data_dir, vocab_name)
#!gsutil cp {vocab_file} {data_dir}
# Get the encoders from the problem
encoders = ende_problem.feature_encoders(data_dir)
# Setup helper functions for encoding and decoding
def encode(input_str, output_str=None):
"""Input str to features dict, ready for inference"""
inputs = encoders["inputs"].encode(input_str) + [1] # add EOS id
batch_inputs = tf.reshape(inputs, [1, -1, 1]) # Make it 3D.
return {"inputs": batch_inputs}
def decode(integers):
"""List of ints to str"""
integers = list(np.squeeze(integers))
if 1 in integers:
integers = integers[:integers.index(1)]
return encoders["inputs"].decode(np.squeeze(integers))
# There are many models available in Tensor2Tensor
print(registry.list_models())
# Create hparams and the model
model_name = "transformer"
hparams_set = "transformer_base"
hparams = trainer_lib.create_hparams(hparams_set, data_dir=data_dir, problem_name="translate_ende_wmt32k")
# NOTE: Only create the model once when restoring from a checkpoint; it's a
# Layer and so subsequent instantiations will have different variable scopes
# that will not match the checkpoint.
translate_model = registry.model(model_name)(hparams, Modes.EVAL)
# Copy the pretrained checkpoint locally
ckpt_name = "transformer_ende_test"
gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)
#!gsutil -q cp -R {gs_ckpt} {checkpoint_dir}
ckpt_path = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name))
# Restore and translate!
def translate(inputs):
encoded_inputs = encode(inputs)
with tfe.restore_variables_on_create(ckpt_path):
model_output = translate_model.infer(encoded_inputs)["outputs"]
return decode(model_output)
inputs = "The animal didn't cross the street because it was too tired"
outputs = translate(inputs)
print("Inputs: %s" % inputs)
print("Outputs: %s" % outputs)
| 30.221154 | 106 | 0.773147 |
614f11e58d17ac1f2198ebd1a2050bc7905de66e | 741 | py | Python | setup.py | atbradley/eaccpf-indexer | da50970a0ffbc9b0eb892814ef07071ed5e843b2 | [
"Apache-2.0"
] | null | null | null | setup.py | atbradley/eaccpf-indexer | da50970a0ffbc9b0eb892814ef07071ed5e843b2 | [
"Apache-2.0"
] | null | null | null | setup.py | atbradley/eaccpf-indexer | da50970a0ffbc9b0eb892814ef07071ed5e843b2 | [
"Apache-2.0"
] | null | null | null | """
This file is subject to the terms and conditions defined in the
LICENSE file, which is part of this source code package.
"""
from pip.req import parse_requirements
from setuptools import setup, find_packages
# read metadata
with open('README.md', 'r') as f:
description = f.read()
version = '1.5.1'
# package requirements
install_reqs = parse_requirements('requirements.txt')
requirements = [str(ir.req) for ir in install_reqs]
setup(name='Indexer',
description=description,
author='Davis Marques <dmarques@unimelb.edu.au> eScholarship Research Center, University of Melbourne',
url='http://www.esrc.unimelb.edu.au',
version=version,
packages=find_packages(),
install_requires=requirements,
)
| 28.5 | 109 | 0.731444 |
70793451c614c39edf0baa82c9faac06537e5b68 | 10,062 | py | Python | global_helpers/panther_oss_helpers.py | BatteryCandy/panther-analysis | a67ca63581fe328d3f2c7493f0cdb889acb3181f | [
"Apache-2.0"
] | 1 | 2020-10-21T08:14:49.000Z | 2020-10-21T08:14:49.000Z | global_helpers/panther_oss_helpers.py | georgeSkoumas/panther-analysis | 30b21c270504bf7c84f99207c9c6c2f6110843ae | [
"Apache-2.0"
] | null | null | null | global_helpers/panther_oss_helpers.py | georgeSkoumas/panther-analysis | 30b21c270504bf7c84f99207c9c6c2f6110843ae | [
"Apache-2.0"
] | null | null | null | """Utility functions provided to policies and rules during execution."""
from ipaddress import ip_network
import time
from typing import Any, Dict, Union, Sequence, Set
import boto3
_RESOURCE_TABLE = None # boto3.Table resource, lazily constructed
class BadLookup(Exception):
"""Error returned when a resource lookup fails."""
class PantherBadInput(Exception):
"""Error returned when a Panther helper function is provided bad input."""
def get_s3_arn_by_name(name: str) -> str:
"""This function is used to construct an s3 bucket ARN from its name."""
if name == '':
raise PantherBadInput('s3 name cannot be blank')
return 'arn:aws:s3:::' + name
def s3_lookup_by_name(name: str) -> Dict[str, Any]:
"""This function is used to get an S3 bucket resource from just its name."""
return resource_lookup(get_s3_arn_by_name(name))
def resource_table() -> boto3.resource:
"""Lazily build resource table"""
# pylint: disable=global-statement
global _RESOURCE_TABLE
if not _RESOURCE_TABLE:
# pylint: disable=no-member
_RESOURCE_TABLE = boto3.resource('dynamodb').Table('panther-resources')
return _RESOURCE_TABLE
def resource_lookup(resource_id: str) -> Dict[str, Any]:
"""This function is used to get a resource from the resources-api based on its resourceID."""
# Validate input so we can provide meaningful error messages to users
if resource_id == '':
raise PantherBadInput('resourceId cannot be blank')
# Get the item from dynamo
response = resource_table().get_item(Key={'id': resource_id})
# Check if dynamo failed
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
raise BadLookup('dynamodb - ' + str(status_code) + ' HTTPStatusCode')
# Check if the item was found
if 'Item' not in response:
raise BadLookup(resource_id + ' not found')
# Return just the attributes of the item
return response['Item']['attributes']
# Expects a string in cidr notation (e.g. '10.0.0.0/24') indicating the ip range being checked
# Returns True if any ip in the range is marked as DMZ space.
DMZ_NETWORKS = [
ip_network('10.1.0.0/24'),
ip_network('100.1.0.0/24'),
]
def is_dmz_cidr(ip_range):
"""This function determines whether a given IP range is within the defined DMZ IP range."""
return any(
ip_network(ip_range).overlaps(dmz_network)
for dmz_network in DMZ_NETWORKS)
DMZ_TAG_KEY = 'environment'
DMZ_TAG_VALUE = 'dmz'
# Defaults to False to assume something is not a DMZ if it is not tagged
def is_dmz_tags(resource):
"""This function determines whether a given resource is tagged as exisitng in a DMZ."""
if resource['Tags'] is None:
return False
return resource['Tags'].get(DMZ_TAG_KEY) == DMZ_TAG_VALUE
# Helper functions for accessing Dynamo key-value store.
#
# Keys can be any string specified by rules and policies,
# values are integer counters and/or string sets.
#
# Use kv_table() if you want to interact with the table directly.
_KV_TABLE = None
_COUNT_COL = 'intCount'
_STRING_SET_COL = 'stringSet'
def kv_table() -> boto3.resource:
"""Lazily build key-value table resource"""
# pylint: disable=global-statement
global _KV_TABLE
if not _KV_TABLE:
# pylint: disable=no-member
_KV_TABLE = boto3.resource('dynamodb').Table('panther-kv-store')
return _KV_TABLE
def get_counter(key: str) -> int:
"""Get a counter's current value (defaulting to 0 if key does not exist)."""
response = kv_table().get_item(
Key={'key': key},
ProjectionExpression=_COUNT_COL,
)
return response.get('Item', {}).get(_COUNT_COL, 0)
def increment_counter(key: str, val: int = 1) -> int:
"""Increment a counter in the table.
Args:
key: The name of the counter (need not exist yet)
val: How much to add to the counter
Returns:
The new value of the count
"""
response = kv_table().update_item(
Key={'key': key},
ReturnValues='UPDATED_NEW',
UpdateExpression='ADD #col :incr',
ExpressionAttributeNames={'#col': _COUNT_COL},
ExpressionAttributeValues={':incr': val},
)
# Numeric values are returned as decimal.Decimal
return response['Attributes'][_COUNT_COL].to_integral_value()
def reset_counter(key: str) -> None:
"""Reset a counter to 0."""
kv_table().put_item(Item={'key': key, _COUNT_COL: 0})
def set_key_expiration(key: str, epoch_seconds: int) -> None:
"""Configure the key to automatically expire at the given time.
DynamoDB typically deletes expired items within 48 hours of expiration.
Args:
key: The name of the counter
epoch_seconds: When you want the counter to expire (set to 0 to disable)
"""
kv_table().update_item(Key={'key': key},
UpdateExpression='SET expiresAt = :time',
ExpressionAttributeValues={':time': epoch_seconds})
def get_string_set(key: str) -> Set[str]:
"""Get a string set's current value (defaulting to empty set if key does not exit)."""
response = kv_table().get_item(
Key={'key': key},
ProjectionExpression=_STRING_SET_COL,
)
return response.get('Item', {}).get(_STRING_SET_COL, set())
def put_string_set(key: str, val: Sequence[str]) -> None:
"""Overwrite a string set under the given key.
This is faster than (reset_string_set + add_string_set) if you know exactly what the contents
of the set should be.
Args:
key: The name of the string set
val: A list/set/tuple of strings to store
"""
if len(val) == 0:
# Can't put an empty string set - remove it instead
reset_string_set(key)
else:
kv_table().put_item(Item={'key': key, _STRING_SET_COL: set(val)})
def add_to_string_set(key: str, val: Union[str, Sequence[str]]) -> Set[str]:
"""Add one or more strings to a set.
Args:
key: The name of the string set
val: Either a single string or a list/tuple/set of strings to add
Returns:
The new value of the string set
"""
if isinstance(val, str):
item_value = {val}
else:
item_value = set(val)
if len(item_value) == 0:
# We can't add empty sets, just return the existing value instead
return get_string_set(key)
response = kv_table().update_item(
Key={'key': key},
ReturnValues='UPDATED_NEW',
UpdateExpression='ADD #col :ss',
ExpressionAttributeNames={'#col': _STRING_SET_COL},
ExpressionAttributeValues={':ss': item_value},
)
return response['Attributes'][_STRING_SET_COL]
def remove_from_string_set(key: str, val: Union[str,
Sequence[str]]) -> Set[str]:
"""Remove one or more strings from a set.
Args:
key: The name of the string set
val: Either a single string or a list/tuple/set of strings to remove
Returns:
The new value of the string set
"""
if isinstance(val, str):
item_value = {val}
else:
item_value = set(val)
if len(item_value) == 0:
# We can't remove empty sets, just return the existing value instead
return get_string_set(key)
response = kv_table().update_item(
Key={'key': key},
ReturnValues='UPDATED_NEW',
UpdateExpression='DELETE #col :ss',
ExpressionAttributeNames={'#col': _STRING_SET_COL},
ExpressionAttributeValues={':ss': item_value},
)
return response['Attributes'][_STRING_SET_COL]
def reset_string_set(key: str) -> None:
"""Reset a string set to empty."""
kv_table().update_item(
Key={'key': key},
UpdateExpression='REMOVE #col',
ExpressionAttributeNames={'#col': _STRING_SET_COL},
)
def _test_kv_store():
"""Integration tests which validate the functions which interact with the key-value store.
Deploy Panther and then simply run "python3 panther.py" to test.
"""
assert increment_counter('panther', 1) == 1
assert increment_counter('labs', 3) == 3
assert increment_counter('panther', -2) == -1
assert increment_counter('panther', 0) == -1
assert increment_counter('panther', 11) == 10
assert get_counter('panther') == 10
assert get_counter('labs') == 3
assert get_counter('nonexistent') == 0
reset_counter('panther')
reset_counter('labs')
assert get_counter('panther') == 0
assert get_counter('labs') == 0
set_key_expiration('panther', int(time.time()))
# Add elements in a list, tuple, set, or as singleton strings
# The same key can be used to store int counts and string sets
assert add_to_string_set('panther', ['a', 'b']) == {'a', 'b'}
assert add_to_string_set('panther', ['b', 'a']) == {'a', 'b'}
assert add_to_string_set('panther', 'c') == {'a', 'b', 'c'}
assert add_to_string_set('panther', set()) == {'a', 'b', 'c'}
assert add_to_string_set('panther', {'b', 'c', 'd'}) == {'a', 'b', 'c', 'd'}
assert add_to_string_set('panther', ('d', 'e')) == {'a', 'b', 'c', 'd', 'e'}
# Empty strings are allowed
assert add_to_string_set('panther', '') == {'a', 'b', 'c', 'd', 'e', ''}
assert get_string_set('labs') == set()
assert get_string_set('panther') == {'a', 'b', 'c', 'd', 'e', ''}
assert remove_from_string_set('panther', ['b', 'c', 'd']) == {'a', 'e', ''}
assert remove_from_string_set('panther', '') == {'a', 'e'}
assert remove_from_string_set('panther', '') == {'a', 'e'}
# Overwrite contents completely
put_string_set('panther', ['go', 'python'])
assert get_string_set('panther') == {'go', 'python'}
put_string_set('labs', [])
assert get_string_set('labs') == set()
reset_string_set('panther')
reset_string_set('nonexistent') # no error
assert get_string_set('panther') == set()
if __name__ == '__main__':
_test_kv_store()
| 32.775244 | 97 | 0.645697 |
0e97d88bdf5dcebf95dd51db44ee373b1bb34cf0 | 4,016 | gyp | Python | third_party/domain_registry_provider/src/testing/gtest.gyp | x0rzkov/incubator-pagespeed-mod | d192f0e92bc6201474a9295f50aad625cde68889 | [
"Apache-2.0"
] | null | null | null | third_party/domain_registry_provider/src/testing/gtest.gyp | x0rzkov/incubator-pagespeed-mod | d192f0e92bc6201474a9295f50aad625cde68889 | [
"Apache-2.0"
] | null | null | null | third_party/domain_registry_provider/src/testing/gtest.gyp | x0rzkov/incubator-pagespeed-mod | d192f0e92bc6201474a9295f50aad625cde68889 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'gtest',
'type': 'static_library',
'sources': [
'gtest/include/gtest/gtest-death-test.h',
'gtest/include/gtest/gtest-message.h',
'gtest/include/gtest/gtest-param-test.h',
'gtest/include/gtest/gtest-printers.h',
'gtest/include/gtest/gtest-spi.h',
'gtest/include/gtest/gtest-test-part.h',
'gtest/include/gtest/gtest-typed-test.h',
'gtest/include/gtest/gtest.h',
'gtest/include/gtest/gtest_pred_impl.h',
'gtest/include/gtest/gtest_prod.h',
'gtest/include/gtest/internal/gtest-death-test-internal.h',
'gtest/include/gtest/internal/gtest-filepath.h',
'gtest/include/gtest/internal/gtest-internal.h',
'gtest/include/gtest/internal/gtest-linked_ptr.h',
'gtest/include/gtest/internal/gtest-param-util-generated.h',
'gtest/include/gtest/internal/gtest-param-util.h',
'gtest/include/gtest/internal/gtest-port.h',
'gtest/include/gtest/internal/gtest-string.h',
'gtest/include/gtest/internal/gtest-tuple.h',
'gtest/include/gtest/internal/gtest-type-util.h',
'gtest/src/gtest-all.cc',
'gtest/src/gtest-death-test.cc',
'gtest/src/gtest-filepath.cc',
'gtest/src/gtest-internal-inl.h',
'gtest/src/gtest-port.cc',
'gtest/src/gtest-printers.cc',
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
'gtest',
'gtest/include',
],
'conditions': [
['OS == "mac"', {
'sources': [
'gtest_mac.h',
'gtest_mac.mm',
'platform_test_mac.mm'
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
}],
['OS != "win"', {
'defines': [
# gtest isn't able to figure out when RTTI is disabled for gcc
# versions older than 4.3.2, and assumes it's enabled. Our Mac
# and Linux builds disable RTTI, and cannot guarantee that the
# compiler will be 4.3.2. or newer. The Mac, for example, uses
# 4.2.1 as that is the latest available on that platform. gtest
# must be instructed that RTTI is disabled here, and for any
# direct dependents that might include gtest headers.
'GTEST_HAS_RTTI=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_RTTI=0',
],
},
}],
],
'direct_dependent_settings': {
'defines': [
'UNIT_TEST',
],
'include_dirs': [
'gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
'test': 1,
'conditions': [
['OS=="mac"', {
'run_as': {
'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'],
},
}],
['OS=="win"', {
'run_as': {
'action????': ['$(TargetPath)', '--gtest_print_time'],
},
}],
],
}],
],
'msvs_disabled_warnings': [4800],
},
},
{
'target_name': 'gtest_main',
'type': 'static_library',
'dependencies': [
'gtest',
],
'sources': [
'gtest/src/gtest_main.cc',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 32.387097 | 76 | 0.522659 |
0fc515d8c2f1f66fb8b3599949e56caf4a009bd9 | 2,368 | py | Python | egs/librispeech/ASR/local/compute_fbank_musan.py | danpovey/icefall | 8469f9ae0a2d7686f04e558fba8ddfb5505109fc | [
"Apache-2.0"
] | 2 | 2021-09-02T02:32:11.000Z | 2021-11-01T07:14:15.000Z | egs/librispeech/ASR/local/compute_fbank_musan.py | danpovey/icefall | 8469f9ae0a2d7686f04e558fba8ddfb5505109fc | [
"Apache-2.0"
] | null | null | null | egs/librispeech/ASR/local/compute_fbank_musan.py | danpovey/icefall | 8469f9ae0a2d7686f04e558fba8ddfb5505109fc | [
"Apache-2.0"
] | 2 | 2021-09-02T14:17:20.000Z | 2022-02-28T07:18:34.000Z | #!/usr/bin/env python3
"""
This file computes fbank features of the musan dataset.
Its looks for manifests in the directory data/manifests.
The generated fbank features are saved in data/fbank.
"""
import logging
import os
from pathlib import Path
import torch
from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
# Torch's multithreaded behavior needs to be disabled or it wastes a lot of CPU and
# slow things down. Do this outside of main() in case it needs to take effect
# even when we are not invoking the main (e.g. when spawning subprocesses).
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def compute_fbank_musan():
src_dir = Path("data/manifests")
output_dir = Path("data/fbank")
num_jobs = min(15, os.cpu_count())
num_mel_bins = 80
dataset_parts = (
"music",
"speech",
"noise",
)
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts, output_dir=src_dir
)
assert manifests is not None
musan_cuts_path = output_dir / "cuts_musan.json.gz"
if musan_cuts_path.is_file():
logging.info(f"{musan_cuts_path} already exists - skipping")
return
logging.info("Extracting features for Musan")
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
with get_executor() as ex: # Initialize the executor only once.
# create chunks of Musan with duration 5 - 10 seconds
musan_cuts = (
CutSet.from_manifests(
recordings=combine(
part["recordings"] for part in manifests.values()
)
)
.cut_into_windows(10.0)
.filter(lambda c: c.duration > 5)
.compute_and_store_features(
extractor=extractor,
storage_path=f"{output_dir}/feats_musan",
num_jobs=num_jobs if ex is None else 80,
executor=ex,
storage_type=LilcomHdf5Writer,
)
)
musan_cuts.to_json(musan_cuts_path)
if __name__ == "__main__":
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(format=formatter, level=logging.INFO)
compute_fbank_musan()
| 29.234568 | 83 | 0.658361 |
e8e6f285ecb9a6a9c506755f1330d843829f1354 | 23,590 | py | Python | official/vision/beta/projects/panoptic_maskrcnn/modeling/layers/panoptic_deeplab_merge.py | Lufeifeina/models | d7d260d4c690e5163070e21d75df372ab559ea23 | [
"Apache-2.0"
] | 1 | 2020-09-14T10:46:07.000Z | 2020-09-14T10:46:07.000Z | official/vision/beta/projects/panoptic_maskrcnn/modeling/layers/panoptic_deeplab_merge.py | Lufeifeina/models | d7d260d4c690e5163070e21d75df372ab559ea23 | [
"Apache-2.0"
] | null | null | null | official/vision/beta/projects/panoptic_maskrcnn/modeling/layers/panoptic_deeplab_merge.py | Lufeifeina/models | d7d260d4c690e5163070e21d75df372ab559ea23 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains functions to post-process Panoptic-DeepLab results.
Note that the postprocessing class and the supporting functions are branched
from:
https://github.com/google-research/deeplab2/blob/main/model/post_processor/panoptic_deeplab.py
with minor changes.
"""
import functools
from typing import List, Tuple, Dict, Text
import tensorflow as tf
from official.vision.beta.projects.panoptic_maskrcnn.ops import mask_ops
def _add_zero_padding(input_tensor: tf.Tensor, kernel_size: int,
rank: int) -> tf.Tensor:
"""Adds zero-padding to the input_tensor."""
pad_total = kernel_size - 1
pad_begin = pad_total // 2
pad_end = pad_total - pad_begin
if rank == 3:
return tf.pad(
input_tensor,
paddings=[[pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
else:
return tf.pad(
input_tensor,
paddings=[[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
def _get_semantic_predictions(semantic_logits: tf.Tensor) -> tf.Tensor:
"""Computes the semantic classes from the predictions.
Args:
semantic_logits: A tf.tensor of shape [batch, height, width, classes].
Returns:
A tf.Tensor containing the semantic class prediction of shape
[batch, height, width].
"""
return tf.argmax(semantic_logits, axis=-1, output_type=tf.int32)
def _get_instance_centers_from_heatmap(
center_heatmap: tf.Tensor,
center_threshold: float,
nms_kernel_size: int,
keep_k_centers: int) -> Tuple[tf.Tensor, tf.Tensor]:
"""Computes a list of instance centers.
Args:
center_heatmap: A tf.Tensor of shape [height, width, 1].
center_threshold: A float setting the threshold for the center heatmap.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep (K).
Non-positive values will keep all centers.
Returns:
A tuple of
- tf.Tensor of shape [N, 2] containing N center coordinates (after
non-maximum suppression) in (y, x) order.
- tf.Tensor of shape [height, width] containing the center heatmap after
non-maximum suppression.
"""
# Threshold center map.
center_heatmap = tf.where(
tf.greater(center_heatmap, center_threshold), center_heatmap, 0.0)
# Non-maximum suppression.
padded_map = _add_zero_padding(center_heatmap, nms_kernel_size, rank=3)
pooled_center_heatmap = tf.keras.backend.pool2d(
tf.expand_dims(padded_map, 0),
pool_size=(nms_kernel_size, nms_kernel_size),
strides=(1, 1),
padding='valid',
pool_mode='max')
center_heatmap = tf.where(
tf.equal(pooled_center_heatmap, center_heatmap), center_heatmap, 0.0)
center_heatmap = tf.squeeze(center_heatmap, axis=[0, 3])
# `centers` is of shape (N, 2) with (y, x) order of the second dimension.
centers = tf.where(tf.greater(center_heatmap, 0.0))
if keep_k_centers > 0 and tf.shape(centers)[0] > keep_k_centers:
topk_scores, _ = tf.math.top_k(
tf.reshape(center_heatmap, [-1]), keep_k_centers, sorted=False)
centers = tf.where(tf.greater(center_heatmap, topk_scores[-1]))
return centers, center_heatmap
def _find_closest_center_per_pixel(centers: tf.Tensor,
center_offsets: tf.Tensor) -> tf.Tensor:
"""Assigns all pixels to their closest center.
Args:
centers: A tf.Tensor of shape [N, 2] containing N centers with coordinate
order (y, x).
center_offsets: A tf.Tensor of shape [height, width, 2].
Returns:
A tf.Tensor of shape [height, width] containing the index of the closest
center, per pixel.
"""
height = tf.shape(center_offsets)[0]
width = tf.shape(center_offsets)[1]
x_coord, y_coord = tf.meshgrid(tf.range(width), tf.range(height))
coord = tf.stack([y_coord, x_coord], axis=-1)
center_per_pixel = tf.cast(coord, tf.float32) + center_offsets
# centers: [N, 2] -> [N, 1, 2].
# center_per_pixel: [H, W, 2] -> [1, H*W, 2].
centers = tf.cast(tf.expand_dims(centers, 1), tf.float32)
center_per_pixel = tf.reshape(center_per_pixel, [height*width, 2])
center_per_pixel = tf.expand_dims(center_per_pixel, 0)
# distances: [N, H*W].
distances = tf.norm(centers - center_per_pixel, axis=-1)
return tf.reshape(tf.argmin(distances, axis=0), [height, width])
def _get_instances_from_heatmap_and_offset(
semantic_segmentation: tf.Tensor, center_heatmap: tf.Tensor,
center_offsets: tf.Tensor, center_threshold: float,
thing_class_ids: tf.Tensor, nms_kernel_size: int,
keep_k_centers: int) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the instance assignment per pixel.
Args:
semantic_segmentation: A tf.Tensor containing the semantic labels of shape
[height, width].
center_heatmap: A tf.Tensor of shape [height, width, 1].
center_offsets: A tf.Tensor of shape [height, width, 2].
center_threshold: A float setting the threshold for the center heatmap.
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
Returns:
A tuple of:
- tf.Tensor containing the instance segmentation (filtered with the `thing`
segmentation from the semantic segmentation output) with shape
[height, width].
- tf.Tensor containing the processed centermap with shape [height, width].
- tf.Tensor containing instance scores (where higher "score" is a reasonable
signal of a higher confidence detection.) Will be of shape [height, width]
with the score for a pixel being the score of the instance it belongs to.
The scores will be zero for pixels in background/"stuff" regions.
"""
thing_segmentation = tf.zeros_like(semantic_segmentation)
for thing_id in thing_class_ids:
thing_segmentation = tf.where(tf.equal(semantic_segmentation, thing_id),
1,
thing_segmentation)
centers, processed_center_heatmap = _get_instance_centers_from_heatmap(
center_heatmap, center_threshold, nms_kernel_size, keep_k_centers)
if tf.shape(centers)[0] == 0:
return (tf.zeros_like(semantic_segmentation), processed_center_heatmap,
tf.zeros_like(processed_center_heatmap))
instance_center_index = _find_closest_center_per_pixel(
centers, center_offsets)
# Instance IDs should start with 1. So we use the index into the centers, but
# shifted by 1.
instance_segmentation = tf.cast(instance_center_index, tf.int32) + 1
# The value of the heatmap at an instance's center is used as the score
# for that instance.
instance_scores = tf.gather_nd(processed_center_heatmap, centers)
# This will map the instance scores back to the image space: where each pixel
# has a value equal to the score of its instance.
flat_center_index = tf.reshape(instance_center_index, [-1])
instance_score_map = tf.gather(instance_scores, flat_center_index)
instance_score_map = tf.reshape(instance_score_map,
tf.shape(instance_segmentation))
instance_score_map *= tf.cast(thing_segmentation, tf.float32)
return (thing_segmentation * instance_segmentation, processed_center_heatmap,
instance_score_map)
@tf.function
def _get_panoptic_predictions(
semantic_logits: tf.Tensor, center_heatmap: tf.Tensor,
center_offsets: tf.Tensor, center_threshold: float,
thing_class_ids: tf.Tensor, label_divisor: int, stuff_area_limit: int,
void_label: int, nms_kernel_size: int, keep_k_centers: int
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Computes the semantic class and instance ID per pixel.
Args:
semantic_logits: A tf.Tensor of shape [batch, height, width, classes].
center_heatmap: A tf.Tensor of shape [batch, height, width, 1].
center_offsets: A tf.Tensor of shape [batch, height, width, 2].
center_threshold: A float setting the threshold for the center heatmap.
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit; otherwise,
it will be re-assigned as void_label.
void_label: An integer specifying the void label.
nms_kernel_size: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
Returns:
A tuple of:
- the panoptic prediction as tf.Tensor with shape [batch, height, width].
- the centermap prediction as tf.Tensor with shape [batch, height, width].
- the instance score maps as tf.Tensor with shape [batch, height, width].
- the instance prediction as tf.Tensor with shape [batch, height, width].
"""
semantic_prediction = _get_semantic_predictions(semantic_logits)
batch_size = tf.shape(semantic_logits)[0]
instance_map_lists = tf.TensorArray(
tf.int32, size=batch_size, dynamic_size=False)
center_map_lists = tf.TensorArray(
tf.float32, size=batch_size, dynamic_size=False)
instance_score_map_lists = tf.TensorArray(
tf.float32, size=batch_size, dynamic_size=False)
for i in tf.range(batch_size):
(instance_map, center_map,
instance_score_map) = _get_instances_from_heatmap_and_offset(
semantic_prediction[i, ...], center_heatmap[i, ...],
center_offsets[i, ...], center_threshold, thing_class_ids,
nms_kernel_size, keep_k_centers)
instance_map_lists = instance_map_lists.write(i, instance_map)
center_map_lists = center_map_lists.write(i, center_map)
instance_score_map_lists = instance_score_map_lists.write(
i, instance_score_map)
# This does not work with unknown shapes.
instance_maps = instance_map_lists.stack()
center_maps = center_map_lists.stack()
instance_score_maps = instance_score_map_lists.stack()
panoptic_prediction = _merge_semantic_and_instance_maps(
semantic_prediction, instance_maps, thing_class_ids, label_divisor,
stuff_area_limit, void_label)
return (panoptic_prediction, center_maps, instance_score_maps, instance_maps)
@tf.function
def _merge_semantic_and_instance_maps(
semantic_prediction: tf.Tensor,
instance_maps: tf.Tensor,
thing_class_ids: tf.Tensor,
label_divisor: int,
stuff_area_limit: int,
void_label: int) -> tf.Tensor:
"""Merges semantic and instance maps to obtain panoptic segmentation.
This function merges the semantic segmentation and class-agnostic
instance segmentation to form the panoptic segmentation. In particular,
the class label of each instance mask is inferred from the majority
votes from the corresponding pixels in the semantic segmentation. This
operation is first proposed in the DeeperLab paper and adopted by the
Panoptic-DeepLab.
- DeeperLab: Single-Shot Image Parser, T-J Yang, et al. arXiv:1902.05093.
- Panoptic-DeepLab, B. Cheng, et al. In CVPR, 2020.
Note that this function only supports batch = 1 for simplicity. Additionally,
this function has a slightly different implementation from the provided
TensorFlow implementation `merge_ops` but with a similar performance. This
function is mainly used as a backup solution when you could not successfully
compile the provided TensorFlow implementation. To reproduce our results,
please use the provided TensorFlow implementation (i.e., not use this
function, but the `merge_ops.merge_semantic_and_instance_maps`).
Args:
semantic_prediction: A tf.Tensor of shape [batch, height, width].
instance_maps: A tf.Tensor of shape [batch, height, width].
thing_class_ids: A tf.Tensor of shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit; otherwise,
it will be re-assigned as void_label.
void_label: An integer specifying the void label.
Returns:
panoptic_prediction: A tf.Tensor with shape [batch, height, width].
"""
prediction_shape = semantic_prediction.get_shape().as_list()
# This implementation only supports batch size of 1. Since model construction
# might lose batch size information (and leave it to None), override it here.
prediction_shape[0] = 1
semantic_prediction = tf.ensure_shape(semantic_prediction, prediction_shape)
instance_maps = tf.ensure_shape(instance_maps, prediction_shape)
# Default panoptic_prediction to have semantic label = void_label.
panoptic_prediction = tf.ones_like(
semantic_prediction) * void_label * label_divisor
# Start to paste predicted `thing` regions to panoptic_prediction.
# Infer `thing` segmentation regions from semantic prediction.
semantic_thing_segmentation = tf.zeros_like(semantic_prediction,
dtype=tf.bool)
for thing_class in thing_class_ids:
semantic_thing_segmentation = tf.math.logical_or(
semantic_thing_segmentation,
semantic_prediction == thing_class)
# Keep track of how many instances for each semantic label.
num_instance_per_semantic_label = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, clear_after_read=False)
instance_ids, _ = tf.unique(tf.reshape(instance_maps, [-1]))
for instance_id in instance_ids:
# Instance ID 0 is reserved for crowd region.
if instance_id == 0:
continue
thing_mask = tf.math.logical_and(instance_maps == instance_id,
semantic_thing_segmentation)
if tf.reduce_sum(tf.cast(thing_mask, tf.int32)) == 0:
continue
semantic_bin_counts = tf.math.bincount(
tf.boolean_mask(semantic_prediction, thing_mask))
semantic_majority = tf.cast(
tf.math.argmax(semantic_bin_counts), tf.int32)
while num_instance_per_semantic_label.size() <= semantic_majority:
num_instance_per_semantic_label = num_instance_per_semantic_label.write(
num_instance_per_semantic_label.size(), 0)
new_instance_id = (
num_instance_per_semantic_label.read(semantic_majority) + 1)
num_instance_per_semantic_label = num_instance_per_semantic_label.write(
semantic_majority, new_instance_id)
panoptic_prediction = tf.where(
thing_mask,
tf.ones_like(panoptic_prediction) * semantic_majority * label_divisor
+ new_instance_id,
panoptic_prediction)
# Done with `num_instance_per_semantic_label` tensor array.
num_instance_per_semantic_label.close()
# Start to paste predicted `stuff` regions to panoptic prediction.
instance_stuff_regions = instance_maps == 0
semantic_ids, _ = tf.unique(tf.reshape(semantic_prediction, [-1]))
for semantic_id in semantic_ids:
if tf.reduce_sum(tf.cast(thing_class_ids == semantic_id, tf.int32)) > 0:
continue
# Check stuff area.
stuff_mask = tf.math.logical_and(semantic_prediction == semantic_id,
instance_stuff_regions)
stuff_area = tf.reduce_sum(tf.cast(stuff_mask, tf.int32))
if stuff_area >= stuff_area_limit:
panoptic_prediction = tf.where(
stuff_mask,
tf.ones_like(panoptic_prediction) * semantic_id * label_divisor,
panoptic_prediction)
return panoptic_prediction
class PostProcessor(tf.keras.layers.Layer):
"""This class contains code of a Panoptic-Deeplab post-processor."""
def __init__(
self,
output_size: List[int],
center_score_threshold: float,
thing_class_ids: List[int],
label_divisor: int,
stuff_area_limit: int,
ignore_label: int,
nms_kernel: int,
keep_k_centers: int,
rescale_predictions: bool,
**kwargs):
"""Initializes a Panoptic-Deeplab post-processor.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
center_score_threshold: A float setting the threshold for the center
heatmap.
thing_class_ids: An integer list shape [N] containing N thing indices.
label_divisor: An integer specifying the label divisor of the dataset.
stuff_area_limit: An integer specifying the number of pixels that stuff
regions need to have at least. The stuff region will be included in the
panoptic prediction, only if its area is larger than the limit;
otherwise, it will be re-assigned as void_label.
ignore_label: An integer specifying the void label.
nms_kernel: An integer specifying the nms kernel size.
keep_k_centers: An integer specifying the number of centers to keep.
Negative values will keep all centers.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, image_info is used to rescale predictions.
**kwargs: additional kwargs arguments.
"""
super(PostProcessor, self).__init__(**kwargs)
self._config_dict = {
'output_size': output_size,
'center_score_threshold': center_score_threshold,
'thing_class_ids': thing_class_ids,
'label_divisor': label_divisor,
'stuff_area_limit': stuff_area_limit,
'ignore_label': ignore_label,
'nms_kernel': nms_kernel,
'keep_k_centers': keep_k_centers,
'rescale_predictions': rescale_predictions
}
self._post_processor = functools.partial(
_get_panoptic_predictions,
center_threshold=center_score_threshold,
thing_class_ids=tf.convert_to_tensor(thing_class_ids),
label_divisor=label_divisor,
stuff_area_limit=stuff_area_limit,
void_label=ignore_label,
nms_kernel_size=nms_kernel,
keep_k_centers=keep_k_centers)
def _resize_and_pad_masks(self, mask, image_info):
"""Resizes masks to match the original image shape and pads to`output_size`.
Args:
mask: a padded mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = tf.image.resize(
mask,
rescale_size,
method='bilinear')
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._config_dict['output_size'][0],
self._config_dict['output_size'][1])
return mask
def _resize_and_pad_offset_mask(self, mask, image_info):
"""Rescales and resizes offset masks and pads to`output_size`.
Args:
mask: a padded offset mask tensor.
image_info: a tensor that holds information about original and
preprocessed images.
Returns:
rescaled, resized and padded masks: tf.Tensor.
"""
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
mask = mask_ops.resize_and_rescale_offsets(
tf.expand_dims(mask, axis=0),
rescale_size)[0]
mask = tf.image.crop_to_bounding_box(
mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.pad_to_bounding_box(
mask, 0, 0,
self._config_dict['output_size'][0],
self._config_dict['output_size'][1])
return mask
def call(
self,
result_dict: Dict[Text, tf.Tensor],
image_info: tf.Tensor) -> Dict[Text, tf.Tensor]:
"""Performs the post-processing given model predicted results.
Args:
result_dict: A dictionary of tf.Tensor containing model results. The dict
has to contain
- segmentation_outputs
- instance_centers_heatmap
- instance_centers_offset
image_info: A tf.Tensor of image infos.
Returns:
The post-processed dict of tf.Tensor, containing the following keys:
- panoptic_outputs
- category_mask
- instance_mask
- instance_centers
- instance_score
"""
if self._config_dict['rescale_predictions']:
segmentation_outputs = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(result_dict['segmentation_outputs'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
instance_centers_heatmap = tf.map_fn(
fn=lambda x: self._resize_and_pad_masks(x[0], x[1]),
elems=(result_dict['instance_centers_heatmap'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
instance_centers_offset = tf.map_fn(
fn=lambda x: self._resize_and_pad_offset_mask(x[0], x[1]),
elems=(result_dict['instance_centers_offset'], image_info),
fn_output_signature=tf.float32,
parallel_iterations=32)
else:
segmentation_outputs = tf.image.resize(
result_dict['segmentation_outputs'],
size=self._config_dict['output_size'],
method='bilinear')
instance_centers_heatmap = tf.image.resize(
result_dict['instance_centers_heatmap'],
size=self._config_dict['output_size'],
method='bilinear')
instance_centers_offset = mask_ops.resize_and_rescale_offsets(
result_dict['instance_centers_offset'],
target_size=self._config_dict['output_size'])
processed_dict = {}
(processed_dict['panoptic_outputs'],
processed_dict['instance_centers'],
processed_dict['instance_scores'],
_) = self._post_processor(
tf.nn.softmax(segmentation_outputs, axis=-1),
instance_centers_heatmap,
instance_centers_offset)
label_divisor = self._config_dict['label_divisor']
processed_dict['category_mask'] = (
processed_dict['panoptic_outputs'] // label_divisor)
processed_dict['instance_mask'] = (
processed_dict['panoptic_outputs'] % label_divisor)
processed_dict.update({
'segmentation_outputs': result_dict['segmentation_outputs']})
return processed_dict
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 41.458699 | 94 | 0.711149 |
bcd4cfba8e6ecf1dcbef319a2110a1a3639dee20 | 4,518 | py | Python | terminal.py | shivanshcoder/PyPromptCLI | 854ba97043638d84eb85eb88e8a3c3d0920346d0 | [
"MIT"
] | null | null | null | terminal.py | shivanshcoder/PyPromptCLI | 854ba97043638d84eb85eb88e8a3c3d0920346d0 | [
"MIT"
] | null | null | null | terminal.py | shivanshcoder/PyPromptCLI | 854ba97043638d84eb85eb88e8a3c3d0920346d0 | [
"MIT"
] | null | null | null | from prompt_toolkit.shortcuts import prompt
from prompt_toolkit.completion import WordCompleter, NestedCompleter
class BaseTerminal:
command_list = [
]
def __init__(self):
completer_dict = {}
#for commands in self.command_list:
# if not isinstance(commands, BaseCommand):
# print("ERROR")
# else:
# completer_dict.update(commands.command_choices())
def parse_input(self,string):
"""
parses the input string into required dictionary for further processing
Args:
string (String): the string containing the command
Raises:
Exception: Raised when empty string is provided
Returns:
dict: dictionary containing parsed string with corresponding keys
"""
import shlex
if len(string) == 0:
raise Exception("No string provided")
split_str = shlex.split(string)
string_dict = {
'command': split_str.pop(0),
'flags':[],
'options': {},
'arg':[]
}
index = 1
while len(split_str):
if split_str[0].startswith('-'):
keyword = split_str.pop(0)
if len(split_str):
if split_str[0].startswith('-'):
# next keyword is also option or flag
# thn the last was a flag
string_dict['flags'].append(keyword)
else:
string_dict['options'][keyword] = split_str.pop(0)
else:
string_dict['arg'] = split_str
break
index+=1
return string_dict
def loop(self):
mycompleter = WordCompleter(["Hello", "exit"], ignore_case=True)
while True:
data = prompt(">", completer=mycompleter)
if data == "exit":
break
print(self.parse_input(data))
class BaseCommand:
name = ""
"""
options = {
# The option name
'-option_name':{
#whether the option is compulsory
'required':True/False,
#can the value of the option be just from the list
'strict':True/False
#default value for the option if no value is assigned to the option
'default':'value1',
#list of the suggested or persmissible values
'values':['value1', 'value2']
}
}
"""
options = {}
def __init__(self, command, options, arg):
"""Creates and holds a basic command
Args:
command (String): name of the command
options_list (dict): dictionary of the options
arguements (list): list of the arguements for the command
"""
self.command = command
self.options_list = options
if self.name == '':
self.name = self.__name__
for key in options:
if options[key] != None:
setattr(self, key, options[key])
else:
self.flags.append(options[key])
self.arguements = arg
#TODO make such method later on
@classmethod
def command_from_string(string):
pass
def validity(self):
"""
Checks the validity of the command arguements we give to the class Instance
"""
if self.name == "":
# TODO raise some appropriate error
raise ValueError("No name given to the command class")
for opt_keys in self.options:
required = self.options[opt_keys][required]
default = self.options[opt_keys][required]
required = self.options[opt_keys][required]
# check whether a option is compulsory or not
if required == True:
if not opt_keys in self.options_list:
#no such option was provided
raise Exception("{} this option was not provided".format)
else:
#option was provided
def command(self):
"""
Execute the command in this function
"""
pass
def execute(self, *args, **kwargs):
"""
Function called when the command is called for its function
"""
pass
| 24.031915 | 83 | 0.517043 |
dfb7db97afd9d79fd8de8bf4cefa3ddf48a9dff3 | 1,815 | py | Python | tests/test_boilerplate.py | kronecker08/qxf2-page-object-model | 03383afffec73f4d3038c0e909462e7886227ecb | [
"MIT"
] | 207 | 2017-01-05T17:16:49.000Z | 2022-03-24T13:50:41.000Z | tests/test_boilerplate.py | kronecker08/qxf2-page-object-model | 03383afffec73f4d3038c0e909462e7886227ecb | [
"MIT"
] | 178 | 2018-01-08T22:12:47.000Z | 2022-03-03T14:32:43.000Z | tests/test_boilerplate.py | kronecker08/qxf2-page-object-model | 03383afffec73f4d3038c0e909462e7886227ecb | [
"MIT"
] | 162 | 2017-02-15T09:28:31.000Z | 2022-02-19T14:50:17.000Z | """
This test file will help you get started in writing a new test using our framework
"""
import os,sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from page_objects.PageFactory import PageFactory
from utils.Option_Parser import Option_Parser
import pytest
@pytest.mark.GUI
def test_boilerplate(test_obj):
"Run the test"
try:
#Initalize flags for tests summary
expected_pass = 0
actual_pass = -1
#This is the test object, you can change it to the desired page with relevance to the page factory
test_obj = PageFactory.get_page_object("main page")
#Print out the result
test_obj.write_test_summary()
expected_pass = test_obj.result_counter
actual_pass = test_obj.pass_counter
except Exception as e:
print("Exception when trying to run test: %s"%__file__)
print("Python says:%s"%str(e))
assert expected_pass == actual_pass, "Test failed: %s"%__file__
#---START OF SCRIPT
if __name__=='__main__':
print("Start of %s"%__file__)
#Creating an instance of the class
options_obj = Option_Parser()
options = options_obj.get_options()
#Run the test only if the options provided are valid
if options_obj.check_options(options):
test_obj = PageFactory.get_page_object("Zero",base_url=options.url)
#Setup and register a driver
test_obj.register_driver(options.remote_flag,options.os_name,options.os_version,options.browser,options.browser_version,options.remote_project_name,options.remote_build_name)
test_boilerplate(test_obj)
#teardowm
test_obj.wait(3)
test_obj.teardown()
else:
print('ERROR: Received incorrect comand line input arguments')
print(option_obj.print_usage()) | 31.842105 | 182 | 0.709642 |
7a329d0c5fdf5ab05052681aa310e15b9e8d6d1b | 155 | py | Python | 1064.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | 3 | 2021-09-21T18:50:20.000Z | 2021-12-14T13:07:31.000Z | 1064.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | null | null | null | 1064.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | null | null | null | pos=0
med=0.00
for i in range(6):
x=float(input())
if x>0:
pos+=1
med+=x
print(f"{pos} valores positivos")
print("%.1f"%(med/pos))
| 15.5 | 33 | 0.535484 |
43b83da2f5dfbe0e34ed220e1482735b8bae7b4d | 52,850 | py | Python | netbox/netbox/views/generic.py | royreznik/netbox | 7ec6b4ebb7478c7f1e52d3653b3f6f44a19a3fee | [
"Apache-2.0"
] | null | null | null | netbox/netbox/views/generic.py | royreznik/netbox | 7ec6b4ebb7478c7f1e52d3653b3f6f44a19a3fee | [
"Apache-2.0"
] | null | null | null | netbox/netbox/views/generic.py | royreznik/netbox | 7ec6b4ebb7478c7f1e52d3653b3f6f44a19a3fee | [
"Apache-2.0"
] | null | null | null | import logging
import re
from copy import deepcopy
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist, ValidationError
from django.db import transaction, IntegrityError
from django.db.models import ManyToManyField, ProtectedError
from django.forms import Form, ModelMultipleChoiceField, MultipleHiddenInput, Textarea
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.html import escape
from django.utils.http import is_safe_url
from django.utils.safestring import mark_safe
from django.views.generic import View
from django_tables2.export import TableExport
from extras.models import ExportTemplate
from extras.signals import clear_webhooks
from utilities.error_handlers import handle_protectederror
from utilities.exceptions import AbortTransaction, PermissionsViolation
from utilities.forms import (
BootstrapMixin, BulkRenameForm, ConfirmationForm, CSVDataField, CSVFileField, ImportForm, restrict_form_fields,
)
from utilities.permissions import get_permission_for_model
from utilities.tables import paginate_table
from utilities.utils import normalize_querydict, prepare_cloned_fields
from utilities.views import GetReturnURLMixin, ObjectPermissionRequiredMixin
class ObjectView(ObjectPermissionRequiredMixin, View):
"""
Retrieve a single object for display.
queryset: The base queryset for retrieving the object
template_name: Name of the template to use
"""
queryset = None
template_name = None
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'view')
def get_template_name(self):
"""
Return self.template_name if set. Otherwise, resolve the template path by model app_label and name.
"""
if self.template_name is not None:
return self.template_name
model_opts = self.queryset.model._meta
return f'{model_opts.app_label}/{model_opts.model_name}.html'
def get_extra_context(self, request, instance):
"""
Return any additional context data for the template.
request: The current request
instance: The object being viewed
"""
return {}
def get(self, request, *args, **kwargs):
"""
Generic GET handler for accessing an object by PK or slug
"""
instance = get_object_or_404(self.queryset, **kwargs)
return render(request, self.get_template_name(), {
'object': instance,
**self.get_extra_context(request, instance),
})
class ObjectListView(ObjectPermissionRequiredMixin, View):
"""
List a series of objects.
queryset: The queryset of objects to display. Note: Prefetching related objects is not necessary, as the
table will prefetch objects as needed depending on the columns being displayed.
filter: A django-filter FilterSet that is applied to the queryset
filter_form: The form used to render filter options
table: The django-tables2 Table used to render the objects list
template_name: The name of the template
"""
queryset = None
filterset = None
filterset_form = None
table = None
template_name = 'generic/object_list.html'
action_buttons = ('add', 'import', 'export')
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'view')
def export_yaml(self):
"""
Export the queryset of objects as concatenated YAML documents.
"""
yaml_data = [obj.to_yaml() for obj in self.queryset]
return '---\n'.join(yaml_data)
def export_table(self, table, columns=None):
"""
Export all table data in CSV format.
:param table: The Table instance to export
:param columns: A list of specific columns to include. If not specified, all columns will be exported.
"""
exclude_columns = {'pk'}
if columns:
all_columns = [col_name for col_name, _ in table.selected_columns + table.available_columns]
exclude_columns.update({
col for col in all_columns if col not in columns
})
exporter = TableExport(
export_format=TableExport.CSV,
table=table,
exclude_columns=exclude_columns
)
return exporter.response(
filename=f'netbox_{self.queryset.model._meta.verbose_name_plural}.csv'
)
def get(self, request):
model = self.queryset.model
content_type = ContentType.objects.get_for_model(model)
if self.filterset:
self.queryset = self.filterset(request.GET, self.queryset).qs
# Compile a dictionary indicating which permissions are available to the current user for this model
permissions = {}
for action in ('add', 'change', 'delete', 'view'):
perm_name = get_permission_for_model(model, action)
permissions[action] = request.user.has_perm(perm_name)
# Export template/YAML rendering
if 'export' in request.GET and request.GET['export'] != 'table':
# An export template has been specified
if request.GET['export']:
et = get_object_or_404(ExportTemplate, content_type=content_type, name=request.GET['export'])
try:
return et.render_to_response(self.queryset)
except Exception as e:
messages.error(
request,
"There was an error rendering the selected export template ({}): {}".format(
et.name, e
)
)
# Check for YAML export support
elif hasattr(model, 'to_yaml'):
response = HttpResponse(self.export_yaml(), content_type='text/yaml')
filename = 'netbox_{}.yaml'.format(self.queryset.model._meta.verbose_name_plural)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
# Construct the objects table
table = self.table(self.queryset, user=request.user)
if 'pk' in table.base_columns and (permissions['change'] or permissions['delete']):
table.columns.show('pk')
# Handle table-based exports (current view or static CSV-based)
if request.GET.get('export') == 'table':
columns = [name for name, _ in table.selected_columns]
return self.export_table(table, columns)
elif 'export' in request.GET:
return self.export_table(table)
# Paginate the objects table
paginate_table(table, request)
context = {
'content_type': content_type,
'table': table,
'permissions': permissions,
'action_buttons': self.action_buttons,
'filter_form': self.filterset_form(request.GET, label_suffix='') if self.filterset_form else None,
}
context.update(self.extra_context())
return render(request, self.template_name, context)
def extra_context(self):
return {}
class ObjectEditView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Create or edit a single object.
queryset: The base queryset for the object being modified
model_form: The form used to create or edit the object
template_name: The name of the template
"""
queryset = None
model_form = None
template_name = 'generic/object_edit.html'
def get_required_permission(self):
# self._permission_action is set by dispatch() to either "add" or "change" depending on whether
# we are modifying an existing object or creating a new one.
return get_permission_for_model(self.queryset.model, self._permission_action)
def get_object(self, kwargs):
# Look up an existing object by slug or PK, if provided.
if 'slug' in kwargs:
obj = get_object_or_404(self.queryset, slug=kwargs['slug'])
elif 'pk' in kwargs:
obj = get_object_or_404(self.queryset, pk=kwargs['pk'])
# Otherwise, return a new instance.
else:
return self.queryset.model()
# Take a snapshot of change-logged models
if hasattr(obj, 'snapshot'):
obj.snapshot()
return obj
def alter_obj(self, obj, request, url_args, url_kwargs):
# Allow views to add extra info to an object before it is processed. For example, a parent object can be defined
# given some parameter from the request URL.
return obj
def dispatch(self, request, *args, **kwargs):
# Determine required permission based on whether we are editing an existing object
self._permission_action = 'change' if kwargs else 'add'
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
obj = self.alter_obj(self.get_object(kwargs), request, args, kwargs)
initial_data = normalize_querydict(request.GET)
form = self.model_form(instance=obj, initial=initial_data)
restrict_form_fields(form, request.user)
return render(request, self.template_name, {
'obj': obj,
'obj_type': self.queryset.model._meta.verbose_name,
'form': form,
'return_url': self.get_return_url(request, obj),
})
def post(self, request, *args, **kwargs):
logger = logging.getLogger('netbox.views.ObjectEditView')
obj = self.alter_obj(self.get_object(kwargs), request, args, kwargs)
form = self.model_form(
data=request.POST,
files=request.FILES,
instance=obj
)
restrict_form_fields(form, request.user)
if form.is_valid():
logger.debug("Form validation was successful")
try:
with transaction.atomic():
object_created = form.instance.pk is None
obj = form.save()
# Check that the new object conforms with any assigned object-level permissions
if not self.queryset.filter(pk=obj.pk).first():
raise PermissionsViolation()
msg = '{} {}'.format(
'Created' if object_created else 'Modified',
self.queryset.model._meta.verbose_name
)
logger.info(f"{msg} {obj} (PK: {obj.pk})")
if hasattr(obj, 'get_absolute_url'):
msg = '{} <a href="{}">{}</a>'.format(msg, obj.get_absolute_url(), escape(obj))
else:
msg = '{} {}'.format(msg, escape(obj))
messages.success(request, mark_safe(msg))
if '_addanother' in request.POST:
redirect_url = request.path
return_url = request.GET.get('return_url')
if return_url is not None and is_safe_url(url=return_url, allowed_hosts=request.get_host()):
redirect_url = f'{redirect_url}?return_url={return_url}'
# If the object has clone_fields, pre-populate a new instance of the form
if hasattr(obj, 'clone_fields'):
redirect_url += f"{'&' if return_url else '?'}{prepare_cloned_fields(obj)}"
return redirect(redirect_url)
return_url = self.get_return_url(request, obj)
return redirect(return_url)
except PermissionsViolation:
msg = "Object save failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
else:
logger.debug("Form validation failed")
return render(request, self.template_name, {
'obj': obj,
'obj_type': self.queryset.model._meta.verbose_name,
'form': form,
'return_url': self.get_return_url(request, obj),
})
class ObjectDeleteView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Delete a single object.
queryset: The base queryset for the object being deleted
template_name: The name of the template
"""
queryset = None
template_name = 'generic/object_delete.html'
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'delete')
def get_object(self, kwargs):
# Look up object by slug if one has been provided. Otherwise, use PK.
if 'slug' in kwargs:
obj = get_object_or_404(self.queryset, slug=kwargs['slug'])
else:
obj = get_object_or_404(self.queryset, pk=kwargs['pk'])
# Take a snapshot of change-logged models
if hasattr(obj, 'snapshot'):
obj.snapshot()
return obj
def get(self, request, **kwargs):
obj = self.get_object(kwargs)
form = ConfirmationForm(initial=request.GET)
return render(request, self.template_name, {
'obj': obj,
'form': form,
'obj_type': self.queryset.model._meta.verbose_name,
'return_url': self.get_return_url(request, obj),
})
def post(self, request, **kwargs):
logger = logging.getLogger('netbox.views.ObjectDeleteView')
obj = self.get_object(kwargs)
form = ConfirmationForm(request.POST)
if form.is_valid():
logger.debug("Form validation was successful")
try:
obj.delete()
except ProtectedError as e:
logger.info("Caught ProtectedError while attempting to delete object")
handle_protectederror([obj], request, e)
return redirect(obj.get_absolute_url())
msg = 'Deleted {} {}'.format(self.queryset.model._meta.verbose_name, obj)
logger.info(msg)
messages.success(request, msg)
return_url = form.cleaned_data.get('return_url')
if return_url is not None and is_safe_url(url=return_url, allowed_hosts=request.get_host()):
return redirect(return_url)
else:
return redirect(self.get_return_url(request, obj))
else:
logger.debug("Form validation failed")
return render(request, self.template_name, {
'obj': obj,
'form': form,
'obj_type': self.queryset.model._meta.verbose_name,
'return_url': self.get_return_url(request, obj),
})
class BulkCreateView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Create new objects in bulk.
queryset: Base queryset for the objects being created
form: Form class which provides the `pattern` field
model_form: The ModelForm used to create individual objects
pattern_target: Name of the field to be evaluated as a pattern (if any)
template_name: The name of the template
"""
queryset = None
form = None
model_form = None
pattern_target = ''
template_name = None
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'add')
def get(self, request):
# Set initial values for visible form fields from query args
initial = {}
for field in getattr(self.model_form._meta, 'fields', []):
if request.GET.get(field):
initial[field] = request.GET[field]
form = self.form()
model_form = self.model_form(initial=initial)
return render(request, self.template_name, {
'obj_type': self.model_form._meta.model._meta.verbose_name,
'form': form,
'model_form': model_form,
'return_url': self.get_return_url(request),
})
def post(self, request):
logger = logging.getLogger('netbox.views.BulkCreateView')
model = self.queryset.model
form = self.form(request.POST)
model_form = self.model_form(request.POST)
if form.is_valid():
logger.debug("Form validation was successful")
pattern = form.cleaned_data['pattern']
new_objs = []
try:
with transaction.atomic():
# Create objects from the expanded. Abort the transaction on the first validation error.
for value in pattern:
# Reinstantiate the model form each time to avoid overwriting the same instance. Use a mutable
# copy of the POST QueryDict so that we can update the target field value.
model_form = self.model_form(request.POST.copy())
model_form.data[self.pattern_target] = value
# Validate each new object independently.
if model_form.is_valid():
obj = model_form.save()
logger.debug(f"Created {obj} (PK: {obj.pk})")
new_objs.append(obj)
else:
# Copy any errors on the pattern target field to the pattern form.
errors = model_form.errors.as_data()
if errors.get(self.pattern_target):
form.add_error('pattern', errors[self.pattern_target])
# Raise an IntegrityError to break the for loop and abort the transaction.
raise IntegrityError()
# Enforce object-level permissions
if self.queryset.filter(pk__in=[obj.pk for obj in new_objs]).count() != len(new_objs):
raise PermissionsViolation
# If we make it to this point, validation has succeeded on all new objects.
msg = "Added {} {}".format(len(new_objs), model._meta.verbose_name_plural)
logger.info(msg)
messages.success(request, msg)
if '_addanother' in request.POST:
return redirect(request.path)
return redirect(self.get_return_url(request))
except IntegrityError:
pass
except PermissionsViolation:
msg = "Object creation failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
else:
logger.debug("Form validation failed")
return render(request, self.template_name, {
'form': form,
'model_form': model_form,
'obj_type': model._meta.verbose_name,
'return_url': self.get_return_url(request),
})
class ObjectImportView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Import a single object (YAML or JSON format).
queryset: Base queryset for the objects being created
model_form: The ModelForm used to create individual objects
related_object_forms: A dictionary mapping of forms to be used for the creation of related (child) objects
template_name: The name of the template
"""
queryset = None
model_form = None
related_object_forms = dict()
template_name = 'generic/object_import.html'
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'add')
def get(self, request):
form = ImportForm()
return render(request, self.template_name, {
'form': form,
'obj_type': self.queryset.model._meta.verbose_name,
'return_url': self.get_return_url(request),
})
def post(self, request):
logger = logging.getLogger('netbox.views.ObjectImportView')
form = ImportForm(request.POST)
if form.is_valid():
logger.debug("Import form validation was successful")
# Initialize model form
data = form.cleaned_data['data']
model_form = self.model_form(data)
restrict_form_fields(model_form, request.user)
# Assign default values for any fields which were not specified. We have to do this manually because passing
# 'initial=' to the form on initialization merely sets default values for the widgets. Since widgets are not
# used for YAML/JSON import, we first bind the imported data normally, then update the form's data with the
# applicable field defaults as needed prior to form validation.
for field_name, field in model_form.fields.items():
if field_name not in data and hasattr(field, 'initial'):
model_form.data[field_name] = field.initial
if model_form.is_valid():
try:
with transaction.atomic():
# Save the primary object
obj = model_form.save()
# Enforce object-level permissions
if not self.queryset.filter(pk=obj.pk).first():
raise PermissionsViolation()
logger.debug(f"Created {obj} (PK: {obj.pk})")
# Iterate through the related object forms (if any), validating and saving each instance.
for field_name, related_object_form in self.related_object_forms.items():
logger.debug("Processing form for related objects: {related_object_form}")
related_obj_pks = []
for i, rel_obj_data in enumerate(data.get(field_name, list())):
f = related_object_form(obj, rel_obj_data)
for subfield_name, field in f.fields.items():
if subfield_name not in rel_obj_data and hasattr(field, 'initial'):
f.data[subfield_name] = field.initial
if f.is_valid():
related_obj = f.save()
related_obj_pks.append(related_obj.pk)
else:
# Replicate errors on the related object form to the primary form for display
for subfield_name, errors in f.errors.items():
for err in errors:
err_msg = "{}[{}] {}: {}".format(field_name, i, subfield_name, err)
model_form.add_error(None, err_msg)
raise AbortTransaction()
# Enforce object-level permissions on related objects
model = related_object_form.Meta.model
if model.objects.filter(pk__in=related_obj_pks).count() != len(related_obj_pks):
raise ObjectDoesNotExist
except AbortTransaction:
clear_webhooks.send(sender=self)
except PermissionsViolation:
msg = "Object creation failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
if not model_form.errors:
logger.info(f"Import object {obj} (PK: {obj.pk})")
messages.success(request, mark_safe('Imported object: <a href="{}">{}</a>'.format(
obj.get_absolute_url(), obj
)))
if '_addanother' in request.POST:
return redirect(request.get_full_path())
return_url = form.cleaned_data.get('return_url')
if return_url is not None and is_safe_url(url=return_url, allowed_hosts=request.get_host()):
return redirect(return_url)
else:
return redirect(self.get_return_url(request, obj))
else:
logger.debug("Model form validation failed")
# Replicate model form errors for display
for field, errors in model_form.errors.items():
for err in errors:
if field == '__all__':
form.add_error(None, err)
else:
form.add_error(None, "{}: {}".format(field, err))
else:
logger.debug("Import form validation failed")
return render(request, self.template_name, {
'form': form,
'obj_type': self.queryset.model._meta.verbose_name,
'return_url': self.get_return_url(request),
})
class BulkImportView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Import objects in bulk (CSV format).
queryset: Base queryset for the model
model_form: The form used to create each imported object
table: The django-tables2 Table used to render the list of imported objects
template_name: The name of the template
widget_attrs: A dict of attributes to apply to the import widget (e.g. to require a session key)
"""
queryset = None
model_form = None
table = None
template_name = 'generic/object_bulk_import.html'
widget_attrs = {}
def _import_form(self, *args, **kwargs):
class ImportForm(BootstrapMixin, Form):
csv = CSVDataField(
from_form=self.model_form,
widget=Textarea(attrs=self.widget_attrs)
)
csv_file = CSVFileField(
label="CSV file",
from_form=self.model_form,
required=False
)
def clean(self):
csv_rows = self.cleaned_data['csv'][1] if 'csv' in self.cleaned_data else None
csv_file = self.files.get('csv_file')
# Check that the user has not submitted both text data and a file
if csv_rows and csv_file:
raise ValidationError(
"Cannot process CSV text and file attachment simultaneously. Please choose only one import "
"method."
)
return ImportForm(*args, **kwargs)
def _save_obj(self, obj_form, request):
"""
Provide a hook to modify the object immediately before saving it (e.g. to encrypt secret data).
"""
return obj_form.save()
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'add')
def get(self, request):
return render(request, self.template_name, {
'form': self._import_form(),
'fields': self.model_form().fields,
'obj_type': self.model_form._meta.model._meta.verbose_name,
'return_url': self.get_return_url(request),
})
def post(self, request):
logger = logging.getLogger('netbox.views.BulkImportView')
new_objs = []
form = self._import_form(request.POST, request.FILES)
if form.is_valid():
logger.debug("Form validation was successful")
try:
# Iterate through CSV data and bind each row to a new model form instance.
with transaction.atomic():
if request.FILES:
headers, records = form.cleaned_data['csv_file']
else:
headers, records = form.cleaned_data['csv']
for row, data in enumerate(records, start=1):
obj_form = self.model_form(data, headers=headers)
restrict_form_fields(obj_form, request.user)
if obj_form.is_valid():
obj = self._save_obj(obj_form, request)
new_objs.append(obj)
else:
for field, err in obj_form.errors.items():
form.add_error('csv', "Row {} {}: {}".format(row, field, err[0]))
raise ValidationError("")
# Enforce object-level permissions
if self.queryset.filter(pk__in=[obj.pk for obj in new_objs]).count() != len(new_objs):
raise PermissionsViolation
# Compile a table containing the imported objects
obj_table = self.table(new_objs)
if new_objs:
msg = 'Imported {} {}'.format(len(new_objs), new_objs[0]._meta.verbose_name_plural)
logger.info(msg)
messages.success(request, msg)
return render(request, "import_success.html", {
'table': obj_table,
'return_url': self.get_return_url(request),
})
except ValidationError:
clear_webhooks.send(sender=self)
except PermissionsViolation:
msg = "Object import failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
else:
logger.debug("Form validation failed")
return render(request, self.template_name, {
'form': form,
'fields': self.model_form().fields,
'obj_type': self.model_form._meta.model._meta.verbose_name,
'return_url': self.get_return_url(request),
})
class BulkEditView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Edit objects in bulk.
queryset: Custom queryset to use when retrieving objects (e.g. to select related objects)
filter: FilterSet to apply when deleting by QuerySet
table: The table used to display devices being edited
form: The form class used to edit objects in bulk
template_name: The name of the template
"""
queryset = None
filterset = None
table = None
form = None
template_name = 'generic/object_bulk_edit.html'
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'change')
def get(self, request):
return redirect(self.get_return_url(request))
def post(self, request, **kwargs):
logger = logging.getLogger('netbox.views.BulkEditView')
model = self.queryset.model
# If we are editing *all* objects in the queryset, replace the PK list with all matched objects.
if request.POST.get('_all') and self.filterset is not None:
pk_list = self.filterset(request.GET, self.queryset.values_list('pk', flat=True)).qs
else:
pk_list = request.POST.getlist('pk')
if '_apply' in request.POST:
form = self.form(model, request.POST)
restrict_form_fields(form, request.user)
if form.is_valid():
logger.debug("Form validation was successful")
custom_fields = form.custom_fields if hasattr(form, 'custom_fields') else []
standard_fields = [
field for field in form.fields if field not in custom_fields + ['pk']
]
nullified_fields = request.POST.getlist('_nullify')
try:
with transaction.atomic():
updated_objects = []
for obj in self.queryset.filter(pk__in=form.cleaned_data['pk']):
# Take a snapshot of change-logged models
if hasattr(obj, 'snapshot'):
obj.snapshot()
# Update standard fields. If a field is listed in _nullify, delete its value.
for name in standard_fields:
try:
model_field = model._meta.get_field(name)
except FieldDoesNotExist:
# This form field is used to modify a field rather than set its value directly
model_field = None
# Handle nullification
if name in form.nullable_fields and name in nullified_fields:
if isinstance(model_field, ManyToManyField):
getattr(obj, name).set([])
else:
setattr(obj, name, None if model_field.null else '')
# ManyToManyFields
elif isinstance(model_field, ManyToManyField):
if form.cleaned_data[name]:
getattr(obj, name).set(form.cleaned_data[name])
# Normal fields
elif name in form.changed_data:
setattr(obj, name, form.cleaned_data[name])
# Update custom fields
for name in custom_fields:
if name in form.nullable_fields and name in nullified_fields:
obj.custom_field_data[name] = None
elif name in form.changed_data:
obj.custom_field_data[name] = form.cleaned_data[name]
obj.full_clean()
obj.save()
updated_objects.append(obj)
logger.debug(f"Saved {obj} (PK: {obj.pk})")
# Add/remove tags
if form.cleaned_data.get('add_tags', None):
obj.tags.add(*form.cleaned_data['add_tags'])
if form.cleaned_data.get('remove_tags', None):
obj.tags.remove(*form.cleaned_data['remove_tags'])
# Enforce object-level permissions
if self.queryset.filter(pk__in=[obj.pk for obj in updated_objects]).count() != len(updated_objects):
raise PermissionsViolation
if updated_objects:
msg = 'Updated {} {}'.format(len(updated_objects), model._meta.verbose_name_plural)
logger.info(msg)
messages.success(self.request, msg)
return redirect(self.get_return_url(request))
except ValidationError as e:
messages.error(self.request, "{} failed validation: {}".format(obj, ", ".join(e.messages)))
clear_webhooks.send(sender=self)
except PermissionsViolation:
msg = "Object update failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
else:
logger.debug("Form validation failed")
else:
# Include the PK list as initial data for the form
initial_data = {'pk': pk_list}
# Check for other contextual data needed for the form. We avoid passing all of request.GET because the
# filter values will conflict with the bulk edit form fields.
# TODO: Find a better way to accomplish this
if 'device' in request.GET:
initial_data['device'] = request.GET.get('device')
elif 'device_type' in request.GET:
initial_data['device_type'] = request.GET.get('device_type')
form = self.form(model, initial=initial_data)
restrict_form_fields(form, request.user)
# Retrieve objects being edited
table = self.table(self.queryset.filter(pk__in=pk_list), orderable=False)
if not table.rows:
messages.warning(request, "No {} were selected.".format(model._meta.verbose_name_plural))
return redirect(self.get_return_url(request))
return render(request, self.template_name, {
'form': form,
'table': table,
'obj_type_plural': model._meta.verbose_name_plural,
'return_url': self.get_return_url(request),
})
class BulkRenameView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
An extendable view for renaming objects in bulk.
"""
queryset = None
template_name = 'generic/object_bulk_rename.html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Create a new Form class from BulkRenameForm
class _Form(BulkRenameForm):
pk = ModelMultipleChoiceField(
queryset=self.queryset,
widget=MultipleHiddenInput()
)
self.form = _Form
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'change')
def post(self, request):
logger = logging.getLogger('netbox.views.BulkRenameView')
if '_preview' in request.POST or '_apply' in request.POST:
form = self.form(request.POST, initial={'pk': request.POST.getlist('pk')})
selected_objects = self.queryset.filter(pk__in=form.initial['pk'])
if form.is_valid():
try:
with transaction.atomic():
renamed_pks = []
for obj in selected_objects:
# Take a snapshot of change-logged models
if hasattr(obj, 'snapshot'):
obj.snapshot()
find = form.cleaned_data['find']
replace = form.cleaned_data['replace']
if form.cleaned_data['use_regex']:
try:
obj.new_name = re.sub(find, replace, obj.name)
# Catch regex group reference errors
except re.error:
obj.new_name = obj.name
else:
obj.new_name = obj.name.replace(find, replace)
renamed_pks.append(obj.pk)
if '_apply' in request.POST:
for obj in selected_objects:
obj.name = obj.new_name
obj.save()
# Enforce constrained permissions
if self.queryset.filter(pk__in=renamed_pks).count() != len(selected_objects):
raise PermissionsViolation
messages.success(request, "Renamed {} {}".format(
len(selected_objects),
self.queryset.model._meta.verbose_name_plural
))
return redirect(self.get_return_url(request))
except PermissionsViolation:
msg = "Object update failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
else:
form = self.form(initial={'pk': request.POST.getlist('pk')})
selected_objects = self.queryset.filter(pk__in=form.initial['pk'])
return render(request, self.template_name, {
'form': form,
'obj_type_plural': self.queryset.model._meta.verbose_name_plural,
'selected_objects': selected_objects,
'return_url': self.get_return_url(request),
})
class BulkDeleteView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Delete objects in bulk.
queryset: Custom queryset to use when retrieving objects (e.g. to select related objects)
filter: FilterSet to apply when deleting by QuerySet
table: The table used to display devices being deleted
form: The form class used to delete objects in bulk
template_name: The name of the template
"""
queryset = None
filterset = None
table = None
form = None
template_name = 'generic/object_bulk_delete.html'
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'delete')
def get(self, request):
return redirect(self.get_return_url(request))
def post(self, request, **kwargs):
logger = logging.getLogger('netbox.views.BulkDeleteView')
model = self.queryset.model
# Are we deleting *all* objects in the queryset or just a selected subset?
if request.POST.get('_all'):
qs = model.objects.all()
if self.filterset is not None:
qs = self.filterset(request.GET, qs).qs
pk_list = qs.only('pk').values_list('pk', flat=True)
else:
pk_list = [int(pk) for pk in request.POST.getlist('pk')]
form_cls = self.get_form()
if '_confirm' in request.POST:
form = form_cls(request.POST)
if form.is_valid():
logger.debug("Form validation was successful")
# Delete objects
queryset = self.queryset.filter(pk__in=pk_list)
deleted_count = queryset.count()
try:
for obj in queryset:
# Take a snapshot of change-logged models
if hasattr(obj, 'snapshot'):
obj.snapshot()
obj.delete()
except ProtectedError as e:
logger.info("Caught ProtectedError while attempting to delete objects")
handle_protectederror(queryset, request, e)
return redirect(self.get_return_url(request))
msg = f"Deleted {deleted_count} {model._meta.verbose_name_plural}"
logger.info(msg)
messages.success(request, msg)
return redirect(self.get_return_url(request))
else:
logger.debug("Form validation failed")
else:
form = form_cls(initial={
'pk': pk_list,
'return_url': self.get_return_url(request),
})
# Retrieve objects being deleted
table = self.table(self.queryset.filter(pk__in=pk_list), orderable=False)
if not table.rows:
messages.warning(request, "No {} were selected for deletion.".format(model._meta.verbose_name_plural))
return redirect(self.get_return_url(request))
return render(request, self.template_name, {
'form': form,
'obj_type_plural': model._meta.verbose_name_plural,
'table': table,
'return_url': self.get_return_url(request),
})
def get_form(self):
"""
Provide a standard bulk delete form if none has been specified for the view
"""
class BulkDeleteForm(ConfirmationForm):
pk = ModelMultipleChoiceField(queryset=self.queryset, widget=MultipleHiddenInput)
if self.form:
return self.form
return BulkDeleteForm
#
# Device/VirtualMachine components
#
# TODO: Replace with BulkCreateView
class ComponentCreateView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Add one or more components (e.g. interfaces, console ports, etc.) to a Device or VirtualMachine.
"""
queryset = None
form = None
model_form = None
template_name = 'generic/object_edit.html'
def get_required_permission(self):
return get_permission_for_model(self.queryset.model, 'add')
def get(self, request):
form = self.form(initial=request.GET)
return render(request, self.template_name, {
'obj_type': self.queryset.model._meta.verbose_name,
'form': form,
'return_url': self.get_return_url(request),
})
def post(self, request):
logger = logging.getLogger('netbox.views.ComponentCreateView')
form = self.form(request.POST, initial=request.GET)
self.validate_form(request, form)
if form.is_valid() and not form.errors:
if '_addanother' in request.POST:
return redirect(request.get_full_path())
else:
return redirect(self.get_return_url(request))
return render(request, self.template_name, {
'obj_type': self.queryset.model._meta.verbose_name,
'form': form,
'return_url': self.get_return_url(request),
})
def validate_form(self, request, form):
"""
Validate form values and set errors on the form object as they are detected. If
no errors are found, signal success messages.
"""
logger = logging.getLogger('netbox.views.ComponentCreateView')
if form.is_valid():
new_components = []
data = deepcopy(request.POST)
names = form.cleaned_data['name_pattern']
labels = form.cleaned_data.get('label_pattern')
for i, name in enumerate(names):
label = labels[i] if labels else None
# Initialize the individual component form
data['name'] = name
data['label'] = label
if hasattr(form, 'get_iterative_data'):
data.update(form.get_iterative_data(i))
component_form = self.model_form(data)
if component_form.is_valid():
new_components.append(component_form)
else:
for field, errors in component_form.errors.as_data().items():
# Assign errors on the child form's name/label field to name_pattern/label_pattern on the parent form
if field == 'name':
field = 'name_pattern'
elif field == 'label':
field = 'label_pattern'
for e in errors:
form.add_error(field, '{}: {}'.format(name, ', '.join(e)))
if not form.errors:
try:
with transaction.atomic():
# Create the new components
new_objs = []
for component_form in new_components:
obj = component_form.save()
new_objs.append(obj)
# Enforce object-level permissions
if self.queryset.filter(pk__in=[obj.pk for obj in new_objs]).count() != len(new_objs):
raise PermissionsViolation
messages.success(request, "Added {} {}".format(
len(new_components), self.queryset.model._meta.verbose_name_plural
))
# Return the newly created objects so overridden post methods can use the data as needed.
return new_objs
except PermissionsViolation:
msg = "Component creation failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
return None
class BulkComponentCreateView(GetReturnURLMixin, ObjectPermissionRequiredMixin, View):
"""
Add one or more components (e.g. interfaces, console ports, etc.) to a set of Devices or VirtualMachines.
"""
parent_model = None
parent_field = None
form = None
queryset = None
model_form = None
filterset = None
table = None
template_name = 'generic/object_bulk_add_component.html'
def get_required_permission(self):
return f'dcim.add_{self.queryset.model._meta.model_name}'
def post(self, request):
logger = logging.getLogger('netbox.views.BulkComponentCreateView')
parent_model_name = self.parent_model._meta.verbose_name_plural
model_name = self.queryset.model._meta.verbose_name_plural
# Are we editing *all* objects in the queryset or just a selected subset?
if request.POST.get('_all') and self.filterset is not None:
pk_list = [obj.pk for obj in self.filterset(request.GET, self.parent_model.objects.only('pk')).qs]
else:
pk_list = [int(pk) for pk in request.POST.getlist('pk')]
selected_objects = self.parent_model.objects.filter(pk__in=pk_list)
if not selected_objects:
messages.warning(request, "No {} were selected.".format(self.parent_model._meta.verbose_name_plural))
return redirect(self.get_return_url(request))
table = self.table(selected_objects)
if '_create' in request.POST:
form = self.form(request.POST)
if form.is_valid():
logger.debug("Form validation was successful")
new_components = []
data = deepcopy(form.cleaned_data)
try:
with transaction.atomic():
for obj in data['pk']:
names = data['name_pattern']
labels = data['label_pattern'] if 'label_pattern' in data else None
for i, name in enumerate(names):
label = labels[i] if labels else None
component_data = {
self.parent_field: obj.pk,
'name': name,
'label': label
}
component_data.update(data)
component_form = self.model_form(component_data)
if component_form.is_valid():
instance = component_form.save()
logger.debug(f"Created {instance} on {instance.parent_object}")
new_components.append(instance)
else:
for field, errors in component_form.errors.as_data().items():
for e in errors:
form.add_error(field, '{} {}: {}'.format(obj, name, ', '.join(e)))
# Enforce object-level permissions
if self.queryset.filter(pk__in=[obj.pk for obj in new_components]).count() != len(new_components):
raise PermissionsViolation
except IntegrityError:
clear_webhooks.send(sender=self)
except PermissionsViolation:
msg = "Component creation failed due to object-level permissions violation"
logger.debug(msg)
form.add_error(None, msg)
clear_webhooks.send(sender=self)
if not form.errors:
msg = "Added {} {} to {} {}.".format(
len(new_components),
model_name,
len(form.cleaned_data['pk']),
parent_model_name
)
logger.info(msg)
messages.success(request, msg)
return redirect(self.get_return_url(request))
else:
logger.debug("Form validation failed")
else:
form = self.form(initial={'pk': pk_list})
return render(request, self.template_name, {
'form': form,
'parent_model_name': parent_model_name,
'model_name': model_name,
'table': table,
'return_url': self.get_return_url(request),
})
| 40.779321 | 125 | 0.56789 |
8a0a3521ea041a0795dc48b874f4cfc33084e76a | 1,220 | py | Python | starter/core/logging.py | ManfredBalvet/bomberjam-bot | 1929c7dfa888791e96f79b42329d4a809dfd85cd | [
"MIT"
] | null | null | null | starter/core/logging.py | ManfredBalvet/bomberjam-bot | 1929c7dfa888791e96f79b42329d4a809dfd85cd | [
"MIT"
] | null | null | null | starter/core/logging.py | ManfredBalvet/bomberjam-bot | 1929c7dfa888791e96f79b42329d4a809dfd85cd | [
"MIT"
] | 3 | 2021-03-08T01:12:29.000Z | 2021-12-28T04:42:53.000Z | import logging
from datetime import datetime
from pathlib import Path
LOGGING_CONFIGURED = False
def configure_file_logging(file_id):
"""
Configures the logger to log to a file.
:param file_id: An id to append to the file name. Useful when you run the same code but you want identifiable log files
:return: None
"""
global LOGGING_CONFIGURED
Path("./logs").mkdir(exist_ok=True)
logging.basicConfig(filename=__get_logging_file_name__(file_id), level=logging.DEBUG)
LOGGING_CONFIGURED = True
def log(content):
"""
Logs the content to file. You must call configure_file_logging before using log.
:param content: Anything that can be represented as a string
:return: None
"""
global LOGGING_CONFIGURED
if LOGGING_CONFIGURED:
logging.debug(content)
def __get_logging_file_name__(file_id):
"""
Composes a logging file name. It contains a timestamp followed by a file id.
Example: 20210306113741-MyBot-2.log
:param file_id: An id to append to the file name. Useful when you run the same code but you want identifiable log files
:return: str
"""
return f"logs/{datetime.now().strftime('%Y%m%d%H%M%S')}-{file_id}.log"
| 28.372093 | 123 | 0.715574 |
21cdf29ebd7ee3396a66f9398a61d30b700572ba | 360 | py | Python | federatedscope/cv/trainer/trainer.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 9 | 2022-03-24T07:59:37.000Z | 2022-03-31T06:47:52.000Z | federatedscope/cv/trainer/trainer.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 1 | 2022-03-28T13:52:17.000Z | 2022-03-28T13:52:17.000Z | federatedscope/cv/trainer/trainer.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | null | null | null | from federatedscope.register import register_trainer
from federatedscope.core.trainers.trainer import GeneralTorchTrainer
class CVTrainer(GeneralTorchTrainer):
pass
def call_cv_trainer(trainer_type):
if trainer_type == 'cvtrainer':
trainer_builder = CVTrainer
return trainer_builder
register_trainer('cvtrainer', call_cv_trainer)
| 22.5 | 68 | 0.791667 |
63d44ee900a75ce221997e514e769dd1adb64e11 | 31,490 | py | Python | nevergrad/functions/control/core.py | microprediction/nevergrad | 5e4c00d74e84dfb0283ab3d35dd85fde0bb49c29 | [
"MIT"
] | null | null | null | nevergrad/functions/control/core.py | microprediction/nevergrad | 5e4c00d74e84dfb0283ab3d35dd85fde0bb49c29 | [
"MIT"
] | null | null | null | nevergrad/functions/control/core.py | microprediction/nevergrad | 5e4c00d74e84dfb0283ab3d35dd85fde0bb49c29 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Trained policies were extracted from https://github.com/modestyachts/ARS
# under their own license. See ARS_LICENSE file in this file's directory
import numpy as np
import gym
import nevergrad.common.typing as tp
from nevergrad.parametrization import parameter as p
from .. import base
from .mujoco import GenericMujocoEnv
class BaseFunction(base.ExperimentFunction):
"""This (abstract) class is a generic wrapper of OpenAI Gym env for policy evaluation.
Attributes need to be override accordingly to have concrete working function.
Attributes.
------------
env_name: str
Gym OpenAI environment name
policy_dim: tuple
Shape of the policy
state_mean: list
Average state values of multiple independent runs.
Current implementations use values from https://github.com/modestyachts/ARS
state_std: list
Standard deviation of state values of multiple independent runs.
Current implementations use values from https://github.com/modestyachts/ARS
Parameters
-----------
num_rollouts: int
number of independent runs.
activation: str (default: 'tanh')
activation function, only applicable when len(intermediate_layer_dim>) > 0
deterministic_sim: bool (default: True)
Deterministic when initial states are the same for all evaluations
noise_level: float (default: 0.)
Level (standard deviation) of noise applied to final action
intermediate_layer_dim: tuple or None
Shape of intermediate layers if exists else None
layer_rescaling_coef: tuple or None
Scaling coefficient of output layers
states_normalization: bool (default: True)
Normalize states with `state_mean` and `state_std`
random_state: int or None
random state for reproducibility in Gym environment.
"""
env_name: str
state_mean: tp.Any
state_std: tp.Any
policy_dim: tp.Tuple[int, ...]
def __init__(
self,
num_rollouts: int,
activation: str = "tanh",
intermediate_layer_dim: tp.Optional[tuple] = None,
deterministic_sim: bool = True,
noise_level: float = 0.0,
states_normalization: bool = True,
layer_rescaling_coef: tp.Optional[tuple] = None,
random_state: tp.Optional[int] = None,
) -> None:
if intermediate_layer_dim is not None:
self.policy_dim = (self.policy_dim[0],) + intermediate_layer_dim + (self.policy_dim[1],) # type: ignore
list_parametrizations = [
p.Array(shape=(a, b)).set_name(r"layer_{a}_{b}")
for a, b in zip(self.policy_dim[:-1], self.policy_dim[1:])
]
parametrization: p.Tuple = p.Tuple(*list_parametrizations).set_name(self.env_name)
super().__init__(self._simulate, parametrization)
self.num_rollouts = num_rollouts
self.random_state = random_state
self.activation = activation
self.states_normalization = states_normalization
self.noise_level = noise_level
self.deterministic_sim = deterministic_sim
self.layer_rescaling_coef = layer_rescaling_coef
if layer_rescaling_coef is None:
self.layer_rescaling_coef = np.ones(len(self.policy_dim) - 1) # type: ignore
self.add_descriptors(
num_rollouts=num_rollouts,
intermediate_layer_dim=intermediate_layer_dim,
activation=activation,
states_normalization=states_normalization,
noise_level=self.noise_level,
deterministic_sim=deterministic_sim,
)
if self.noise_level > 0.0 or not deterministic_sim:
self.parametrization.descriptors.deterministic_function = False
self._descriptors.pop("random_state", None) # remove it from automatically added descriptors
def _simulate(self, x: tp.Tuple) -> float:
try:
env = GenericMujocoEnv(
env_name=self.env_name,
state_mean=self.state_mean if self.states_normalization else None,
state_std=self.state_std if self.states_normalization else None,
num_rollouts=self.num_rollouts,
activation=self.activation,
layer_rescaling_coef=self.layer_rescaling_coef,
noise_level=self.noise_level,
random_state=self.parametrization.random_state,
)
except gym.error.DependencyNotInstalled as e:
raise base.UnsupportedExperiment("Missing mujoco_py") from e
env.env.seed(
self.random_state if self.deterministic_sim else self.parametrization.random_state.randint(10000)
)
loss = env(x)
# base.update_leaderboard(f'{self.env_name},{self.parametrization.dimension}', loss, x, verbose=True)
return loss
def evaluation_function(self, *recommendations: p.Parameter) -> float:
assert len(recommendations) == 1, "Should not be a pareto set for a monoobjective function"
x = recommendations[0].value
# pylint: disable=not-callable
loss = self.function(x)
assert isinstance(loss, float)
base.update_leaderboard(f"{self.env_name},{self.parametrization.dimension}", loss, x, verbose=True)
return loss
# pylint: disable=line-too-long
# for black (since lists are way too long...)
# fmt: off
class Ant(BaseFunction):
env_name = 'Ant-v2'
policy_dim: tp.Tuple[int, ...] = (111, 8)
# Values from https://github.com/modestyachts/ARS/tree/master/trained_policies/Ant-v1
state_mean = [0.556454034343523, 0.9186531687987207, -0.003597273626800536, -0.062027209820968335,
-0.22668151226915723, -0.16602131317263671, 0.7987064451368338, -0.08837934771162403,
-0.5953573569489272, 0.31458301497655355, -0.5859666391196517, 0.06840799715850679,
0.6996188089127978, 2.957808943475944, 0.13145167715976466, 0.000408239775951425,
0.0006251405790721873, -0.01324649360933024, -0.010626814219628156, -0.0009340884177352034,
0.011235220933040631, -0.0037160116570636036, -0.018631124226896525, -0.00019431878659138357,
-0.023557209761883236, 0.013354894059913155, 0.012594611303292104, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-0.0002492521170156384, 5.2942688175970466e-06, 4.758743764016147e-06, 0.00023284707214249071,
0.000575762572920161, 0.008342223628535816, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -3.805817666962799e-05,
2.944689402684725e-05, 4.622284386412991e-05, 2.0463934941826443e-05, -2.982578719158166e-07,
0.0003232789482430665, 0.01190863730406915, -0.028847450862012118, -
0.0016049128587483268, -0.011546822405861629, -0.005185736733314698, 0.04395870932465012, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.00010306612506050966, 4.182155321673672e-05, -3.6599965153626765e-05,
3.7417588383893424e-05, -1.9852387226708935e-05, 0.0004990991660521935, 0.11191092578395258,
0.007451270486499179, -0.043879628923914234, 0.029746731314372638, 0.004893585846331603,
0.1289749574211565, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00011265043476062721, -3.2651266270538316e-05,
4.192724068962384e-05, -3.599135906255506e-05, -5.0593649720061946e-05, 0.0005518224383528348,
-0.06107825100454972, 0.06997376760122764, -0.0012712356710709012, -0.04546462254285545,
-0.01656209815809629, 0.08931254442546284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -5.558307634083136e-05,
-6.29738077240061e-05, -6.133269876741076e-05, -4.7529805786119285e-05, 2.6168802203125756e-05,
0.0003865204399435512, -0.08619662966579106, -0.07392835616333841, 0.06349519253475051,
0.04758718422237922, 0.014684068888490893, 0.10487554379090397]
state_std = [0.09823861308889045, 0.17141367557864626, 0.0923065066441211, 0.09857913658212542, 0.23067414103638456,
0.31929625189221916, 0.2789541347319086, 0.4252996376350357, 0.17273612261821686, 0.3024306630085608,
0.15440628974854603, 0.42709280554821294, 0.21228009754109947, 1.627223228431405, 0.8187247777350416,
0.9552450884883069, 0.9842769566724734, 1.1630255246361634, 0.9579164107073792, 2.1506606935604275,
3.602875428572795, 5.628269094686002, 0.9920290672268143, 2.1223003669071345, 1.0003529831706024,
5.57262446791663, 2.12884125072627, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 0.04912611953512608,
0.04889164520640186, 0.010828667552457222, 0.07233078284166466, 0.07267150019759298,
0.0909353971096799, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 0.015251132328387708,
0.01519859897632306, 0.014332713037703749, 0.016533211312666475, 0.01655178210918886,
0.017826147794067864, 0.19608990202601936, 0.20036636045336445, 0.1867258748179935,
0.19829593162153397, 0.19688339050234013, 0.20454779790272495, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08,
1e-08, 0.018432894986970972, 0.018497451738447097, 0.01722172494351193, 0.020457389878712977,
0.020483738957125595, 0.022133816429764575, 0.3350906849993091, 0.33250359129341234,
0.33900096746510755, 0.3443797803052648, 0.3439163943012149, 0.33448413391786447, 1e-08, 1e-08, 1e-08,
1e-08, 1e-08, 1e-08, 0.019536561966571657, 0.0193568339032099, 0.0178721598998528, 0.0215286944065503,
0.021618066835186175, 0.023317064797031488, 0.28256515904161045, 0.2805053208089945,
0.2749447000464192, 0.2834385598495182, 0.2828637149337391, 0.2845542953486725, 1e-08, 1e-08, 1e-08,
1e-08, 1e-08, 1e-08, 0.01632940767643436, 0.01639546342576569, 0.015535938645943628,
0.018094969304990823, 0.01806480253410683, 0.019505571384448334, 0.3051507639463494,
0.30417415230423156, 0.29980481621099997, 0.30983874726354155, 0.3104093749664208, 0.30598187568861357]
class Swimmer(BaseFunction):
env_name = 'Swimmer-v2'
policy_dim: tp.Tuple[int, ...] = (8, 2)
# These values are from https://github.com/modestyachts/ARS/tree/master/trained_policies/Swimmer-v1
state_mean = 0
state_std = 1
class HalfCheetah(BaseFunction):
env_name = 'HalfCheetah-v2'
policy_dim: tp.Tuple[int, ...] = (17, 6)
# These values are from https://github.com/modestyachts/ARS/tree/master/trained_policies/HalfCheetah-v1
state_mean = [-0.09292822734440935, 0.07602245420984527, 0.0899374674155617,
0.02011249469254209, 0.0798156434227294, -0.08428448356578108,
-0.0215013060089565, -0.03109106115312925, 4.822879258044724,
-0.053464747098009684, -0.0318163014347514, -0.03715201186578856,
-0.1867529885329672, 0.06631679668009596, -0.09290028455365171,
0.15043390964540423, 0.2042574041974179]
state_std = [0.06852462787695646, 0.3584372681186917, 0.3787484779100599,
0.36028136793720056, 0.40588221665043894, 0.44399708334861426,
0.3060632009780872, 0.3746540859283749, 2.249878345532693,
0.7502914419398155, 1.9942769393981352, 8.823299886314189,
7.667188604733051, 9.357981552790314, 10.539094922171378,
8.2635861157824, 9.381873068915496]
class Hopper(BaseFunction):
env_name = 'Hopper-v2'
policy_dim: tp.Tuple[int, ...] = (11, 3)
# These values are from https://github.com/modestyachts/ARS/tree/master/trained_policies/Hopper-v1
state_mean = [1.41599384098897, -0.05478601506999949, -0.2552221576338897,
-0.2540472105290307, 0.2752508450342385, 2.608895291030614,
-0.0085351966531823, 0.006837504545317028, -0.07123674129372454,
-0.0504483911501519, -0.45569643721456643]
state_std = [0.19805723063173689, 0.07824487681531123, 0.17120271410673388,
0.32000514149385056, 0.6240188361452976, 0.8281416142079099,
1.5191581374558094, 1.1737837205443291, 1.8776124850625862,
3.634827607782184, 5.716475201040653]
class Walker2d(BaseFunction):
env_name = 'Walker2d-v2'
policy_dim: tp.Tuple[int, ...] = (17, 6)
# These values are from https://github.com/modestyachts/ARS/tree/master/trained_policies/Walker2d-v1/gait5_reward_11200.npz
state_mean = [0.8488498617542702, -0.6611547735815038, -1.2396970579853086,
-0.8950078841472574, -0.24708567001479317, -0.026436682326840807,
-2.0152198942892237, 0.1772209506456087, 7.124879504132625,
-0.08177482100506603, -0.0973090698139016, -0.2565691144289943,
0.007630772271257735, 0.6811054136606614, -0.02307099876146226,
-0.196427427788658, -1.1504456811780719]
state_std = [0.05610221999432808, 0.11317036984128691, 0.3121976594462603,
0.31169452648959445, 0.6468349769198745, 0.05473553450998447,
0.2433434422998723, 0.9155127813052794, 3.2146809848738442,
0.5296362410124568, 2.2232944517739037, 2.992691886327696,
4.95843329244578, 8.56053510942742, 3.039674909483538,
4.235747025296144, 8.980030325898877]
class Humanoid(BaseFunction):
env_name = 'Humanoid-v2'
policy_dim: tp.Tuple[int, ...] = (376, 17)
# These values are from https://github.com/modestyachts/ARS/tree/master/trained_policies/Humanoid-v1/policy_reward_11600
state_mean = [1.223512941151342, 0.9285620267217891, -0.04600612288662621, -0.24887096958019758,
-0.22325955719472168, -0.17629463383430644, 0.47841679596165526, -0.18890539170346107,
-0.19861972206496825, 0.13014262053110004, -0.03651792561052908, -0.7429555858796192,
-0.04746818133811182, 0.011198448910765477, -0.2910494124293159, -1.5267713093717408,
-1.0860041310082882, -1.244709380985192, 0.7792656365408227, 1.2563074982027704, 1.077594999103332,
-1.5002654383639942, 5.332955853399292, 0.030647237247258364, -0.024606249596252006,
-0.3399735071215924, 0.3497503796975279, -0.21264331665159478, -0.046535856125467005,
0.027206740034526798, -0.01843168788695134, -0.06392540632051621, -0.013627498342509871,
-0.13801869738264383, -0.17097839147116675, -0.25660044197293674, -0.04987117645475261,
-0.11400057167681625, -0.10262052970843882, -0.055106111711606016, -0.07942557939501395,
0.03894579493830569, 0.08768440576334802, 0.060153409243360094, -0.10358948903618044, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.6864405737063366, 1.7076508830342796, 0.09763867145634159,
0.0177693503584045, 0.19197507541602837, -0.13022395106115325, -0.3395871721740677,
0.2562544681872592, 3.5792264913034333, 8.322078939354402, 0.06310964955706921, 0.07021720642108964,
0.018755612200744218, -0.00024661224176565904, -0.02273113952129911, 0.003641050523315394,
0.13493216255476234, -0.02224559995972755, 0.3309184911098229, 2.0357520395249753,
0.039372239564168805, 0.05767216431731622, 0.06351784734223695, -0.002171507224015927,
-0.0022645721008887386, 0.002453001129138291, 0.3577885947420132, -0.03661370708511777,
0.0011729236960336075, 5.852787113641054, 0.3218533810252938, 0.23757173763500847,
0.10905681377403795, 0.008554557009504018, -0.002592520618933238, -0.12606972016255838,
0.03745658906944364, -0.6138709131240883, -0.8633463756366405, 4.525556257740668, 0.7663807298580719,
0.7507047423815559, 0.13615329633035106, -0.04288862308679571, -0.1620679984315845,
-0.20773659001748804, -0.29522562094396376, -0.4226395778601069, -1.3271240326474132,
2.63249442247751, 0.7723128871941001, 0.8545894755744188, 0.1546490059888952, -0.05508466315544032,
-0.25608217015596735, -0.15029310219501313, -0.40902946045662475, -0.23415939268459104,
-1.128662676072067, 1.767145867645593, 0.20876401940390307, 0.3496278074348039, 0.17926903865754054,
0.006094541232859462, 0.14443296438594702, 0.0007344993293173703, 0.7734068461356229,
0.05155437153664577, -0.814065770161627, 4.525556257740668, 0.49616999663471184, 0.5529967194894083,
0.0979037368810912, 0.003485975410377934, 0.10490800735541388, 0.0501923166476425, 0.3258194930620621,
0.08316548725040934, -1.0567633678115018, 2.63249442247751, 0.4996283348106809, 0.48641724714175905,
0.04794369090820097, -0.0058817462375533765, -0.0038912269269175967, 0.10011275977805349,
0.019911103116523225, 0.20842188994890332, -0.8158057732039949, 1.767145867645593,
0.26094507244463144, 0.30392082021440503, 0.13265630113878285, -0.05431343766698027,
0.11776893466040421, 0.0899084310891727, -0.34511356582586145, -0.25235943483624973,
0.5768083784781206, 1.594059841558777, 0.2699901240424498, 0.2859992911038798, 0.13466612926092844,
-0.05567769331909149, 0.1041584857476096, 0.10643703984120968, -0.2717156620480613,
-0.25857766325871656, 0.4908634951002241, 1.198343130581343, 0.4159715826831813, 0.26370098868805497,
0.16162316986601624, -0.0018967799393832562, -0.00784557474517397, -0.1862393523515524,
0.015258213240316195, 0.48040886397289984, 0.6367434103635446, 1.594059841558777, 0.6289351408862088,
0.14525028999224587, 0.5126330919829216, 0.00625267449663808, 0.0015535620305504336,
-0.24735990025481183, -0.0039015204723075194, 0.760661207912407, 0.39003480548206326,
1.198343130581343, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.009984320860184852, -0.3726808236676091,
0.24820933826970693, 5.502147872070437, 0.0880912529108143, 0.016142302334884586, -0.2180804235944024,
-0.10501848181941069, -0.05474741133691679, 5.327275570497641, -0.06344911018678119,
-0.0018479691003698354, 0.1384063661121756, 0.3758516263819816, -0.22938686291078486,
5.236642459391339, -0.08320873140803955, 0.003093592788745551, 0.23325350435603381, -0.3918345829164,
0.2942527417014775, 5.206484767706763, 0.10870508038885734, 0.0981200303221886, -0.9521365950481183,
0.36293547344041416, -0.3384414723392472, 5.910080127581048, 0.26773018061211007, -0.5945929428933572,
-0.9521365950481183, 0.36293547344041416, -0.3384414723392472, 5.910080127581048, 0.26773018061211007,
-0.5945929428933572, 0.3587232211841977, -0.4104791162839761, 1.5986149190475882, 5.204750377221477,
-0.26852433811945536, 0.19813165370374033, -0.3062371808686244, 0.004238222556072027,
-2.2209658318223635, 5.742622248085341, 0.6722949974624708, -0.500485113914267, -0.3062371808686244,
0.004238222556072027, -2.2209658318223635, 5.742622248085341, 0.6722949974624708, -0.500485113914267,
0.26012708391359357, 0.08467556295331613, -0.21391009904140584, 5.3210057409790155,
0.03506536347635228, -0.001275684097233967, 0.042059501206005254, -0.23778440296675274,
-0.1226403571516149, 5.348137526673431, -0.043272898300492156, 0.06022962996263737,
0.2937088892684222, -0.1977541092634194, -0.28966619577855046, 5.298852322727817, 0.23447012164868147,
-0.025647992690023803, 0.3599616142316438, 0.2572452733614258, -0.10288112745228332, 5.19810241527611,
0.20728458029872535, -0.10683192755398696, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.29556968580778875,
-8.260898269152916, 5.587670071623525, 8.924550388476465, 7.307887810310615, -5.797082988253691,
21.76326786049523, 0.5281607939082064, 3.0078721315484658, -8.25644817359464, 12.92236085784975,
-3.07346302346624, -0.15972201114587897, 3.8488401400073924, 2.9013015503137547, -3.0030744277640964,
-4.125822702844248, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0005411157531993445, 0.00018425900752185883,
-7.056479603492841e-05, 0.0004850329710863729, 0.0013067914565368577, -0.0003632249271021157,
-1.499782615828459e-05, 1.5396507342996525e-05, -8.412078417064285e-07, 0.00010005310427990925,
0.000202125250115722, -0.00023887949816504833, 2.358707586858282, -2.038571404847552,
-0.4050387837681951, 4.231544859434408, -2.40103925025192, 33.864309172326074, -0.5579333098944332,
0.34076320959977724, -0.08857964550304018, -0.9922056623081122, -1.3998733333878197,
1.0435800890962725, -2.0834375475277884, 1.2667537566732037, -0.30076659164163855, -2.936598660670857,
-4.701568329244183, 0.5482891196808485, -10.245820936980875, 7.420004642248826, 0.4255468058894504,
17.167740585723312, 16.175268727949796, 181.48592716060185, 0.11325750561423298, -0.06582083133392769,
0.0526578564471078, 0.2371962961790206, 0.3042510288306412, -0.12644331987411908, 2.523881380476937,
-1.547423424434062, 0.33845447817037794, 3.7044012690867003, 5.77270922236946, -1.4549487373900374,
8.414220368253863, -4.740154811733693, 0.37906093062842394, -6.603153276131259, -14.672811393696326,
152.19174733969905, -9.948554129284157e-07, -3.1729733223247383e-07, 1.6278277777934387e-07,
-3.6080215121179876e-05, -4.327504944517283e-05, 0.00011092777930590747, 0.0005553154635918675,
-0.00021156350367889417, 7.217738550388757e-05, -0.0007368680078812343, -0.001578912969766702,
0.0007730670670347712, -7.128808629768361e-07, 4.5728191857655706e-07, 2.2865347644914397e-07,
9.111110122544389e-06, 1.160719855911783e-05, 9.381773874831127e-06, 5.569740868840155e-06,
-1.4891833711621733e-06, 4.342784980667928e-06, 6.777033412499121e-05, 8.971821928675218e-05,
3.889718928290386e-05]
state_std = [0.027220943204269023, 0.03371739859577961, 0.08672716681743708, 0.05806815850513448,
0.10883032708169692, 0.4016638402629323, 0.08844073386589447, 0.2110629853334749, 0.24960364959583276,
0.3587053640151962, 0.2923748496012834, 0.6022790345863119, 0.10635529785354092, 0.27413209706901964,
0.5359593301373943, 1.1750147170843526, 0.3561241697832425, 0.23480406053326436, 0.14437043059667623,
0.27474052240250313, 0.3243886340303913, 0.18167117971436647, 1.3600379607064053, 0.3218201992448027,
0.5296170780949837, 2.772994615925601, 2.164550492666567, 4.4057685665946, 8.587107721379661,
2.8676340745769813, 4.88090236180869, 6.052503126570467, 7.503054069971461, 8.305551440794792,
16.229904432174102, 4.5671057449517205, 6.114614529333562, 11.845036547829535, 25.913615598243307,
6.252996778447118, 4.662819752229859, 2.8957190183908565, 6.052463628549816, 6.384019209205117,
5.0252937265931035, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08,
0.14238959435961462, 0.15193351258039872, 0.02688268828918757, 0.016475850422186383,
0.11543445291824593, 0.0823683956228624, 0.2518946921093844, 0.1719574248033898, 0.17329794081014366,
1.000431359973871e-08, 0.018478908043846144, 0.02158172877456581, 0.005750979917882275,
0.003355592768176608, 0.00950928580095848, 0.009213885716264863, 0.04213918034993743,
0.05463338285150308, 0.05641535066785075, 1.0000747565482142e-08, 0.005257009841282607,
0.013779048588095219, 0.013687172475653606, 0.006885761634218394, 0.009213393296520895,
0.003311783377220073, 0.11257854322792597, 0.11138235337945995, 0.16028902749863805,
1.0004120117325544e-08, 0.0626332661938698, 0.06765339292738642, 0.047407472159472344,
0.025488317118020798, 0.035085467198771765, 0.032714226833568054, 0.15957042246295786,
0.14690742873683094, 0.17841778151136162, 1.0003444225085218e-08, 0.11855546076295732,
0.1601995018375117, 0.05705779799801954, 0.03282487431040264, 0.10613039235022993, 0.04756340733017935,
0.19614743790486447, 0.11434942867009171, 0.13718925851422167, 1.0002730173229638e-08,
0.19787882550074856, 0.18946485032798877, 0.09441604754591658, 0.03393819538720024,
0.12257527328131092, 0.05519359584132488, 0.19627223909340552, 0.0748500807412641, 0.15313598337114612,
1.0001191898980395e-08, 0.033710679183069324, 0.07266497927654184, 0.08869154115095376,
0.02801793963860596, 0.05256426516615806, 0.03856252413251216, 0.2516601273928587, 0.16221167659334101,
0.07653053984565042, 1.0003444225085218e-08, 0.27148907547270107, 0.21859413334486488,
0.06325191955230788, 0.02302418243440717, 0.08862609459915445, 0.08045492694676731,
0.25942521478053826, 0.16343372818748067, 0.3276168047959743, 1.0002730173229638e-08,
0.3741935150587287, 0.37066538654036724, 0.02743098103313232, 0.01985411168739766, 0.0869625119569415,
0.0701596813703393, 0.1636434670512358, 0.08415384433332133, 0.3997864847957144,
1.0001191898980395e-08, 0.05864711018558324, 0.03873655463517765, 0.02074513521037753,
0.014516459355615415, 0.032864206630095566, 0.028424329271307287, 0.09841092395038074,
0.062363602397650215, 0.05955480591755076, 1.0001024762056902e-08, 0.05010976811305272,
0.0371667135860929, 0.022916757861489532, 0.021714149304502807, 0.042049527956671544,
0.026636578083260077, 0.10537753915021739, 0.05734094560989781, 0.045451706430412284,
1.0001132995384249e-08, 0.024747012172012858, 0.044248606007555084, 0.031031649204226817,
0.023018344234887928, 0.02826943388733819, 0.011944649045757363, 0.07241029584495748,
0.05091277302407085, 0.05390791668152994, 1.0001024762056902e-08, 0.08845251074562006,
0.04669266079787208, 0.07326745630888133, 0.07993263635812788, 0.043248320224860116,
0.046195495431723146, 0.1277474869177115, 0.06732868237334777, 0.060832036356245775,
1.0001132995384249e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 3.9109477625812987,
2.726824503866877, 3.0255580693328183, 1.6749492698497939, 1.4795655652623088, 0.568143823199565,
1.8472520684575449, 2.1900181369366662, 6.182629688404042, 1.447357032272057, 0.5200673335962976,
0.5733433997780786, 3.709921882183585, 3.5247172723139735, 6.242286558556476, 1.402497552638402,
0.47761843119554187, 0.6076037954486013, 5.026204300107199, 5.510097585745013, 3.8589660463280833,
1.568759119818162, 0.5917527332763451, 1.029123122193732, 4.2874345506067675, 9.124784725671551,
5.203728864594666, 5.510637312923947, 2.9473125952312884, 1.427647390892429, 4.2874345506067675,
9.124784725671551, 5.203728864594666, 5.510637312923947, 2.9473125952312884, 1.427647390892429,
7.374427871946933, 9.244789166256488, 4.310956745613239, 1.7092202217527879, 1.1301077085172486,
1.3007111382890009, 8.813130907184895, 13.431406143255899, 3.814435203519042, 7.597007883908303,
5.119591868447307, 3.868104648610097, 8.813130907184895, 13.431406143255899, 3.814435203519042,
7.597007883908303, 5.119591868447307, 3.868104648610097, 3.6034477180617595, 2.5595084056197304,
4.7728021487075205, 1.7315994634713594, 2.159881144941995, 1.229858099522896, 3.6150217079294142,
2.7974447506138613, 4.811699918909136, 1.895348859567646, 2.182604565616805, 1.3472341883825647,
3.4523845665883925, 4.336426110196122, 4.04002517042012, 2.902216962903632, 1.8809257426448065,
1.3918353572611668, 4.063872791376027, 5.196521788130874, 3.84390228786295, 2.7695155263972278,
1.8656837839324878, 2.401450349582291, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 39.31823891856991,
30.278412023383265, 35.40810910501004, 37.979941531373115, 36.78424202471111, 104.80727284053687,
58.22875905949396, 36.0392502862179, 36.61813110510474, 104.8525398280304, 77.23646063277525,
7.73032038936829, 9.568131917771131, 7.980462646552434, 8.858260913945225, 8.862554883315907,
8.45701715868272, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 1e-08, 0.18830165214399114, 0.1342474923753114,
0.05608616493243241, 0.3031034157531549, 0.42340271911121846, 0.23239945675276474,
0.036639848667331484, 0.03854555438139759, 0.021014154359306533, 0.36699977573403253,
0.31037005974137877, 0.5009260515131433, 10.623571227644051, 9.684407819966891, 2.8137093873627883,
36.145319299009536, 19.48448754425835, 150.7907242219333, 11.261628724818983, 7.10279219157998,
2.426720375288641, 20.394312177459685, 27.983819034394788, 23.497420472071315, 22.907935979558523,
14.003856465530275, 3.9708956438441114, 32.23167397585521, 51.218607076172326, 6.116705780736451,
37.32494107277529, 86.34020135034994, 13.888911368783146, 110.89589625498978, 62.09513943796971,
508.12193622186663, 4.752152694418997, 2.5225532126056023, 2.1366178081751994, 9.131638795299004,
12.759569014410413, 5.344681451091026, 25.044004502345594, 15.4964832317486, 4.123332097154291,
37.00187110793075, 56.77840690678802, 23.617605924413496, 57.00451427503392, 59.54812478177995,
13.271760340369124, 125.76601693382943, 79.10729615187654, 593.56498013792, 0.009841201295063823,
0.016483578826677756, 0.008428110524717015, 0.08510005348913796, 0.0765137611941976, 0.146946643258817,
0.19104081083076538, 0.13768028999889403, 0.05997267118679376, 0.358176890915227, 0.47261113152738704,
0.3491160050383241, 0.009979212934286682, 0.011861627518704817, 0.009040977965359007,
0.05760460551559038, 0.05535268123974551, 0.05381626379965322, 0.026251746223865467,
0.03118102610736434, 0.023451607610137155, 0.11839197501185912, 0.12161710627794166,
0.11164783758519019]
| 77.753086 | 127 | 0.693935 |
e1b961eff7506a3e7a5227f322f675ae731751cc | 141 | py | Python | autoload/leaderf/python/leaderf/__init__.py | sjwsl/LeaderF | d92d428379f56e65acf01c381a56031d71e50b58 | [
"Apache-2.0"
] | null | null | null | autoload/leaderf/python/leaderf/__init__.py | sjwsl/LeaderF | d92d428379f56e65acf01c381a56031d71e50b58 | [
"Apache-2.0"
] | null | null | null | autoload/leaderf/python/leaderf/__init__.py | sjwsl/LeaderF | d92d428379f56e65acf01c381a56031d71e50b58 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
| 15.666667 | 35 | 0.617021 |
e760788c5454f29f22e45b7013a679683b95c86e | 2,303 | py | Python | libqtile/scripts/shell.py | 1kyu/qtile | da93518b2ab924e803552decdb364d64de46088d | [
"MIT"
] | 2 | 2021-07-21T20:07:12.000Z | 2021-08-12T18:09:49.000Z | libqtile/scripts/shell.py | 1kyu/qtile | da93518b2ab924e803552decdb364d64de46088d | [
"MIT"
] | 1 | 2022-02-27T12:17:27.000Z | 2022-02-27T12:17:27.000Z | libqtile/scripts/shell.py | 1kyu/qtile | da93518b2ab924e803552decdb364d64de46088d | [
"MIT"
] | 1 | 2020-04-27T22:20:11.000Z | 2020-04-27T22:20:11.000Z | # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import ipc, sh
from libqtile.command import interface
def qshell(args) -> None:
if args.socket is None:
socket = ipc.find_sockfile()
else:
socket = args.socket
client = ipc.Client(socket, is_json=args.is_json)
cmd_object = interface.IPCCommandInterface(client)
qsh = sh.QSh(cmd_object)
if args.command is not None:
qsh.process_line(args.command)
else:
qsh.loop()
def add_subcommand(subparsers, parents):
parser = subparsers.add_parser(
"shell",
parents=parents,
help="shell-like interface to qtile"
)
parser.add_argument(
"-s", "--socket",
action="store", type=str,
default=None,
help='Use specified socket to connect to qtile.'
)
parser.add_argument(
"-c", "--command",
action="store", type=str,
default=None,
help='Run the specified qshell command and exit.'
)
parser.add_argument(
"-j", "--json",
action="store_true",
default=False,
dest="is_json",
help='Use json in order to communicate with qtile server.'
)
parser.set_defaults(func=qshell)
| 35.430769 | 79 | 0.691272 |
41d7d39286c7260d088d75d29089ebe56ea56f17 | 1,224 | py | Python | test/unittests/use_cases/test_add_tag.py | MBlistein/spaced-repetition | c10281d43e928f8d1799076190f962f8e49a405b | [
"MIT"
] | null | null | null | test/unittests/use_cases/test_add_tag.py | MBlistein/spaced-repetition | c10281d43e928f8d1799076190f962f8e49a405b | [
"MIT"
] | null | null | null | test/unittests/use_cases/test_add_tag.py | MBlistein/spaced-repetition | c10281d43e928f8d1799076190f962f8e49a405b | [
"MIT"
] | null | null | null |
import unittest
from unittest.mock import Mock, patch
from spaced_repetition.domain.tag import TagCreator
from spaced_repetition.use_cases.add_tag import TagAdder
# pylint: disable=protected-access
class TestTagAdder(unittest.TestCase):
def setUp(self) -> None:
self.tag = TagCreator.create('new_tag')
@patch.object(TagAdder, '_assert_is_unique')
def test_add_tag(self, mock_assert_unique):
t_a = TagAdder(db_gateway=Mock(), presenter=Mock())
t_a.add_tag(name=self.tag.name)
mock_assert_unique.assert_called_once_with(tag=self.tag)
t_a.repo.create_tag.assert_called_once_with(tag=self.tag) # noqa
def test_assert_is_unique_raises(self):
t_a = TagAdder(db_gateway=Mock(), presenter=Mock())
t_a.repo.tag_exists.return_value = True
with self.assertRaises(ValueError) as context:
t_a._assert_is_unique(tag=self.tag)
self.assertEqual("Tag with name 'new_tag' already exists!",
str(context.exception))
def test_assert_is_unique(self):
t_a = TagAdder(db_gateway=Mock(), presenter=Mock())
t_a.repo.tag_exists.return_value = False
t_a._assert_is_unique(tag=self.tag)
| 32.210526 | 73 | 0.702614 |
9b523e3806b021eb4257b9cc577c69791caea2aa | 3,115 | py | Python | pytorch_3T27T/utils/backer.py | HechengJin0/pytorch_3T27T | 17b723ff15d10b90acced128091553b1b3073c3a | [
"BSD-3-Clause"
] | null | null | null | pytorch_3T27T/utils/backer.py | HechengJin0/pytorch_3T27T | 17b723ff15d10b90acced128091553b1b3073c3a | [
"BSD-3-Clause"
] | null | null | null | pytorch_3T27T/utils/backer.py | HechengJin0/pytorch_3T27T | 17b723ff15d10b90acced128091553b1b3073c3a | [
"BSD-3-Clause"
] | 1 | 2021-11-03T16:20:15.000Z | 2021-11-03T16:20:15.000Z | #!/usr/bin/env python
# coding=utf-8
# Manages Paths for Saving Models and Logs
from pathlib import Path
from os.path import join as pjoin
import re
import datetime
import pytorch_3T27T as pkg
root = Path(pkg.__path__[0]).parent.absolute()
ETC_DIR = "etc"
LOG_DIR = "logs"
CHECKPOINT_DIR = "ckpts"
RUN_DIR = "runs"
__all__ = [
'get_trial_path', 'get_etc_path', 'get_log_path', 'get_trainer_paths',
'get_trial_dict', 'get_timestamp'
]
def ensure_exists(p: Path) -> Path:
"""
Helper to ensure a directory exists.
"""
p = Path(p)
p.mkdir(parents=True, exist_ok=True)
return p
def get_trial_path(config: dict) -> Path:
"""
Construct a path based on the name of a configuration file,
e.g. 'trials/A01-E01-S0001'
"""
p = pjoin(root, config["save_dir"], config["trial_info"]["ID"])
return ensure_exists(p)
def trial_timestamp_path(config: dict) -> Path:
"""
Construct a path based on the name of a configuration file and append a
timestamp, e.g. 'trials/A01-E01-S0001/20211231235959UTC'
"""
timestamp = config["trial_info"]["timestamp"]
p = pjoin(get_trial_path(config), timestamp)
return ensure_exists(p)
def get_etc_path(config: dict) -> Path:
"""
Retuns the config dir, e.g. 'trials/A01-E01-S0001/20211231235959UTC/etc'
"""
p = pjoin(trial_timestamp_path(config), ETC_DIR)
return ensure_exists(p)
def get_log_path(config: dict) -> Path:
"""
Retuns the log dir, e.g. 'trials/A01-E01-S0001/20211231235959UTC/logs'
"""
p = pjoin(trial_timestamp_path(config), LOG_DIR)
return ensure_exists(p)
def get_trainer_paths(config: dict) -> Path:
"""
Returns the paths to save checkpoints and tensorboard runs, e.g.
trials/A01-E01-S0001/20211231235959UTC/ckpts
trials/A01-E01-S0001/20211231235959UTC/runs
"""
trial_timestamp = trial_timestamp_path(config)
return (
ensure_exists(pjoin(trial_timestamp, CHECKPOINT_DIR)),
ensure_exists(pjoin(trial_timestamp, RUN_DIR)),
)
def get_trial_dict(filename: str) -> dict:
"""
Get trial information from file name
Returns
-------
trial : dict
Trial information: ID, Aim, Experiment, Setup
"""
# Parse filename to get this particular experimental run info
trial = {}
nameRegex = re.compile(r'((A\d\d)-(E\d\d)-(S\d\d\d\d))')
trial_ID, aim_ID, exp_ID, setup_ID = nameRegex.search(filename).groups()
trial['ID'] = trial_ID
aimRegex = re.compile(f'({aim_ID})_([\\w\\-]+)')
trial['Aim'] = aimRegex.search(filename).groups()[0]
expRegex = re.compile(f'({exp_ID})_([\\w\\-]+)')
trial['Experiment'] = expRegex.search(filename).groups()[0]
setupRegex = re.compile(f'({setup_ID})_([\\w\\-]+)')
trial['Setup'] = setupRegex.search(filename).groups()[0]
trial['timestamp'] = get_timestamp()
return trial
def get_timestamp() -> str:
"""
Get this experimental run timestamp, e.g., 20211231235959UTC'
"""
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S")
return timestamp + 'UTC'
| 25.743802 | 76 | 0.656501 |
f2f2e1c7d2837d2b054b83e74f2132e6088b6e7a | 2,135 | py | Python | dlu/limitusers/templatetags/limitusers.py | yashrane015/django-limit-users | d7db7bbec3579964efa33b8facfc6d8a3930ead0 | [
"Apache-2.0"
] | 2 | 2015-06-07T21:33:12.000Z | 2022-02-16T02:24:16.000Z | dlu/limitusers/templatetags/limitusers.py | yashrane015/django-limit-users | d7db7bbec3579964efa33b8facfc6d8a3930ead0 | [
"Apache-2.0"
] | null | null | null | dlu/limitusers/templatetags/limitusers.py | yashrane015/django-limit-users | d7db7bbec3579964efa33b8facfc6d8a3930ead0 | [
"Apache-2.0"
] | 1 | 2020-10-01T02:45:33.000Z | 2020-10-01T02:45:33.000Z | from django import template
from django.conf import settings
from dlu.limitusers.models import User, DisabledUser
register = template.Library()
class RegsAvailableNode(template.Node):
def __init__(self, available, var_name = 'registrations_available'):
self.available = available
self.var_name = var_name
def render(self, context):
context[self.var_name] = self.available
return ''
def regs_available(parser, token):
filters = {}
if getattr(settings, 'LIMIT_USERS_IGNORE_ADMIN', True):
filters['is_staff'] = False
filters['is_superuser'] = False
remaining = getattr(settings, 'MAX_USER_REGISTRATIONS') - User.objects.filter(**filters).count()
if remaining < 0:
remaining = 0
return RegsAvailableNode(remaining)
register.tag('regs_available', regs_available)
class RegsAllowedNode(template.Node):
def __init__(self, allowed, var_name = 'registrations_allowed'):
self.allowed = allowed
self.var_name = var_name
def render(self, context):
context[self.var_name] = self.allowed
return ''
def regs_allowed(parser, token):
filters = {}
if getattr(settings, 'LIMIT_USERS_IGNORE_ADMIN', True):
filters['is_staff'] = False
filters['is_superuser'] = False
allowed = getattr(settings, 'MAX_USER_REGISTRATIONS')
return RegsAllowedNode(allowed)
register.tag('regs_allowed', regs_allowed)
@register.simple_tag
def regs_disabled():
return DisabledUser.objects.all().count()
class ActiveUsersNode(template.Node):
def __init__(self, active, var_name = 'active_users'):
self.active = active
self.var_name = var_name
def render(self, context):
context[self.var_name] = self.active
return ''
def active_users(parser, token):
filters = {'is_active':True}
if getattr(settings, 'LIMIT_USERS_IGNORE_ADMIN', True):
filters['is_staff'] = False
filters['is_superuser'] = False
active = User.objects.filter(**filters).count()
return ActiveUsersNode(active)
register.tag('active_users', active_users)
| 30.5 | 100 | 0.688525 |
31e56f79b3e8e9734cb8252fc9669660540a3d95 | 7,877 | py | Python | tests/test_monomial.py | E-G-C/algorithms | 5f1a7863818b016a62ff03a45980f7ec8a970a07 | [
"MIT"
] | 2 | 2022-03-05T17:08:14.000Z | 2022-03-10T07:15:58.000Z | tests/test_monomial.py | E-G-C/algorithms | 5f1a7863818b016a62ff03a45980f7ec8a970a07 | [
"MIT"
] | 18 | 2022-03-01T08:40:32.000Z | 2022-03-05T22:38:43.000Z | tests/test_monomial.py | E-G-C/algorithms | 5f1a7863818b016a62ff03a45980f7ec8a970a07 | [
"MIT"
] | 1 | 2022-03-09T14:43:50.000Z | 2022-03-09T14:43:50.000Z | from algorithms.maths.polynomial import Monomial
from fractions import Fraction
import math
import unittest
class TestSuite(unittest.TestCase):
def setUp(self):
self.m1 = Monomial({})
self.m2 = Monomial({1: 1}, 2)
self.m3 = Monomial({1: 2, 2: -1}, 1.5)
self.m4 = Monomial({1: 1, 2: 2, 3: -2}, 3)
self.m5 = Monomial({2: 1, 3: 0}, Fraction(2, 3))
self.m6 = Monomial({1: 0, 2: 0, 3: 0}, -2.27)
self.m7 = Monomial({1: 2, 7: 2}, -math.pi)
self.m8 = Monomial({150: 5, 170: 2, 10000: 3}, 0)
self.m9 = 2
self.m10 = math.pi
self.m11 = Fraction(3, 8)
self.m12 = 0
self.m13 = Monomial({1: 1}, -2)
self.m14 = Monomial({1: 2}, 3)
self.m15 = Monomial({1: 1}, 3)
self.m16 = Monomial({1: 2, 7: 2}, math.pi)
self.m17 = Monomial({1: -1})
def test_monomial_addition(self):
# Monomials with different underlying variables or
# even different power of those variables must not be added!
self.assertRaises(ValueError, lambda x, y: x + y, self.m1, self.m2)
self.assertRaises(ValueError, lambda x, y: x + y, self.m2, self.m3)
self.assertRaises(ValueError, lambda x, y: x + y, self.m2, self.m14)
# Additive inverses of each other should produce the zero monomial.
self.assertEqual(self.m13 + self.m2, self.m1)
# Zero monomial + Zero monomial = Zero monomial
self.assertEqual(self.m1 + self.m1, self.m1)
# Coefficient float.
self.assertEqual(self.m7 + self.m7, Monomial({1: 2, 7: 2},
-2 * math.pi))
# Coefficient 0 so should equal the zero monomial.
self.assertEqual(self.m8, self.m1)
# The constant term cannot be added to any monomial
# that has any variables.
self.assertRaises(ValueError, lambda x, y: x + y, self.m2, self.m9)
# Any literal cannot be added to a Monomial. However, a monomial
# can be added to any int, float, Fraction, or Monomial.
# So 2 + Monomial is raises TypeError but Monomial + 2 may work fine!
self.assertRaises(TypeError, lambda x, y: x + y, self.m9, self.m2)
# Any constant added to a zero monomial produces
# a monomial.
self.assertEqual(self.m1 + self.m9, Monomial({}, 2))
self.assertEqual(self.m1 + self.m12, Monomial({}, 0))
return
def test_monomial_subtraction(self):
# Monomials with different underlying variables or
# even different power of those variables must not be subtracted!
self.assertRaises(ValueError, lambda x, y: x - y, self.m1, self.m2)
self.assertRaises(ValueError, lambda x, y: x - y, self.m2, self.m3)
self.assertRaises(ValueError, lambda x, y: x - y, self.m2, self.m14)
# Additive inverses of each other should produce the zero monomial.
self.assertEqual(self.m2 - self.m2, self.m1)
self.assertEqual(self.m2 - self.m2, Monomial({}, 0))
# Zero monomial - Zero monomial = Zero monomial
self.assertEqual(self.m1 - self.m1, self.m1)
# Coefficient int.
self.assertEqual(self.m2 - self.m15, Monomial({1: 1}, -1))
# Coefficient float.
self.assertEqual(self.m16 - self.m7, Monomial({1: 2, 7: 2},
2 * math.pi))
# The constant term cannot be added to any monomial
# that has any variables.
self.assertRaises(ValueError, lambda x, y: x - y, self.m2, self.m9)
# Any literal cannot be added to a Monomial. However, a monomial
# can be added to any int, float, Fraction, or Monomial.
# So 2 + Monomial is raises TypeError but Monomial + 2 may work fine!
self.assertRaises(TypeError, lambda x, y: x - y, self.m9, self.m2)
# Any constant added to a zero monomial produces
# a monomial.
self.assertEqual(self.m1 - self.m9, Monomial({}, -2))
self.assertEqual(self.m1 - self.m12, Monomial({}, 0))
return
def test_monomial_multiplication(self):
# Usual multiplication.
# The positive and negative powers of the same variable
# should cancel out.
self.assertEqual(self.m2 * self.m13, Monomial({1: 2}, -4))
self.assertEqual(self.m2 * self.m17, Monomial({}, 2))
# A coefficient of zero should make the product zero.
# Zero monomial * any int, float, Fraction, or Monomial = Zero monomial
self.assertEqual(self.m8 * self.m5, self.m1)
self.assertEqual(self.m1 * self.m2, self.m1)
# Test usual float multiplication.
self.assertEqual(self.m7 * self.m3, Monomial({1: 4, 2: -1, 7: 2},
-1.5*math.pi))
return
def test_monomial_inverse(self):
# The Zero monomial is not invertible.
self.assertRaises(ValueError, lambda x: x.inverse(), self.m1)
self.assertRaises(ValueError, lambda x: x.inverse(), self.m8)
self.assertRaises(ValueError, lambda x: x.inverse(),
Monomial({}, self.m12))
# Check some inverses.
self.assertEqual(self.m7.inverse(), Monomial({1: -2, 7: -2}, -1 / math.pi))
# Doesn't matter if the coefficient is Fraction or float.
# Both should be treated as same.
self.assertEqual(self.m5.inverse(), Monomial({2: -1}, Fraction(3, 2)))
self.assertEqual(self.m5.inverse(), Monomial({2: -1}, 1.5))
# Should work fine without variables too!
self.assertTrue(self.m6.inverse(), Monomial({}, Fraction(-100, 227)))
self.assertEqual(self.m6.inverse(), Monomial({}, -1/2.27))
return
def test_monomial_division(self):
# Any monomial divided by the Zero Monomial should raise a ValueError.
self.assertRaises(ValueError, lambda x, y: x.__truediv__(y),
self.m2, self.m1)
self.assertRaises(ValueError, lambda x, y: x.__truediv__(y),
self.m2, self.m8)
self.assertRaises(ValueError, lambda x, y: x.__truediv__(y),
self.m2, self.m12)
# Test some usual cases.
self.assertEqual(self.m7 / self.m3, Monomial({2: 1, 7: 2},
-2 * math.pi / 3))
self.assertEqual(self.m14 / self.m13, Monomial({1: 1}) * Fraction(-3, 2))
return
def test_monomial_substitution(self):
# Test with int.
self.assertAlmostEqual(self.m7.substitute(2), -16 * math.pi, delta=1e-9)
# Test with float.
self.assertAlmostEqual(self.m7.substitute(1.5), (1.5 ** 4) * -math.pi,
delta=1e-9)
# Test with Fraction.
self.assertAlmostEqual(self.m7.substitute(Fraction(-1, 2)),
(Fraction(-1, 2) ** 4)*-math.pi, delta=1e-9)
# Test with a complete substitution map.
self.assertAlmostEqual(self.m7.substitute({1: 3, 7: 0}),
(3 ** 2) * (0 ** 2) * -math.pi, delta=1e-9)
# Test with a more than complete substitution map.
self.assertAlmostEqual(self.m7.substitute({1: 3, 7: 0, 2: 2}),
(3 ** 2) * (0 ** 2) * -math.pi, delta=1e-9)
# Should raise a ValueError if not enough variables are supplied!
self.assertRaises(ValueError, lambda x, y: x.substitute(y), self.m7,
{1: 3, 2: 2})
self.assertRaises(ValueError, lambda x, y: x.substitute(y), self.m7,
{2: 2})
# The zero monomial always gives zero upon substitution.
self.assertEqual(self.m8.substitute(2), 0)
self.assertEqual(self.m8.substitute({1231: 2, 1: 2}), 0)
return
def test_monomial_all_variables(self):
# Any variable with zero power should not exist in the set
# of variables.
self.assertEqual(self.m5.all_variables(), {2})
self.assertEqual(self.m6.all_variables(), set())
# The zero monomial should output empty set.
self.assertEqual(self.m8.all_variables(), set())
return
def test_monomial_clone(self):
# A monomial should produce its copy
# with same underlying variable dictionary
# and same coefficient.
self.assertEqual(self.m3, self.m3.clone())
# The zero monomial is identified and
# always clones to itself.
self.assertEqual(self.m1, self.m8.clone())
self.assertEqual(self.m1, self.m1.clone())
self.assertEqual(self.m8, self.m1.clone())
self.assertEqual(self.m8, self.m8.clone())
return
if __name__ == '__main__':
unittest.main()
| 35.642534 | 77 | 0.65266 |